././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/0000775000175000017500000000000000000000000013056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/.coveragerc0000664000175000017500000000013100000000000015172 0ustar00zuulzuul00000000000000[run] branch = True source = glance omit = glance/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/.mailmap0000664000175000017500000000226700000000000014506 0ustar00zuulzuul00000000000000# Format is: # # Zhongyue Luo Zhenguo Niu David Koo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/.stestr.conf0000664000175000017500000000017200000000000015327 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${TEST_PATH:-./glance/tests/unit} top_dir=./ group_regex=(glance\.tests\.functional\.serial\.[^.]+\.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/.zuul.yaml0000664000175000017500000004160300000000000015023 0ustar00zuulzuul00000000000000- job: name: glance-tox-oslo-tips-base parent: tox abstract: true nodeset: ubuntu-jammy timeout: 2400 description: Abstract job for Glance vs. oslo libraries # NOTE(rosmaita): we only need functional test jobs, oslo is # already running periodic jobs using our unit tests. Those # jobs are configured for glance in openstack/project-config/ # zuul.d/projects.yaml using the template 'periodic-jobs-with-oslo-master' # which is defined in openstack/openstack-zuul-jobs/zuul.d/ # project-templates.yaml; the jobs the template refers to are # defined in openstack/openstack-zuul-jobs/zuul.d/jobs.yaml required-projects: - name: openstack/debtcollector - name: openstack/futurist - name: openstack/oslo.concurrency - name: openstack/oslo.config - name: openstack/oslo.context - name: openstack/oslo.db - name: openstack/oslo.i18n - name: openstack/oslo.log - name: openstack/oslo.messaging - name: openstack/oslo.middleware - name: openstack/oslo.policy - name: openstack/oslo.utils - name: openstack/osprofiler - name: openstack/stevedore - name: openstack/taskflow - job: name: glance-tox-functional-py312-oslo-tips parent: glance-tox-oslo-tips-base description: | Glance py312 functional tests vs. oslo libraries masters nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: functional-py312 python_use_pyenv: True - job: name: glance-tox-functional-py311-oslo-tips parent: glance-tox-oslo-tips-base description: | Glance py311 functional tests vs. oslo libraries masters vars: python_version: 3.11 tox_envlist: functional-py311 - job: name: glance-tox-keystone-tips-base parent: tox abstract: true nodeset: ubuntu-jammy timeout: 2400 description: Abstract job for Glance vs. keystone required-projects: - name: openstack/keystoneauth - name: openstack/keystonemiddleware - name: openstack/python-keystoneclient - job: name: glance-tox-py312-keystone-tips parent: glance-tox-keystone-tips-base description: | Glance py312 unit tests vs. keystone masters nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: py312 python_use_pyenv: True - job: name: glance-tox-py311-keystone-tips parent: glance-tox-keystone-tips-base description: | Glance py311 unit tests vs. keystone masters vars: python_version: 3.11 tox_envlist: py311 - job: name: glance-tox-functional-py312-keystone-tips parent: glance-tox-keystone-tips-base description: | Glance py312 functional tests vs. keystone masters nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: functional-py312 python_use_pyenv: True - job: name: glance-tox-functional-py311-keystone-tips parent: glance-tox-keystone-tips-base description: | Glance py311 functional tests vs. keystone masters vars: python_version: 3.11 tox_envlist: functional-py311 - job: name: glance-tox-glance_store-tips-base parent: tox abstract: true nodeset: ubuntu-jammy timeout: 2400 description: Abstract job for Glance vs. glance_store required-projects: - name: openstack/glance_store - job: name: glance-tox-py312-glance_store-tips parent: glance-tox-glance_store-tips-base description: | Glance py312 unit tests vs. glance_store master nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: py312 python_use_pyenv: True - job: name: glance-tox-py311-glance_store-tips parent: glance-tox-glance_store-tips-base description: | Glance py311 unit tests vs. glance_store master vars: python_version: 3.11 tox_envlist: py311 - job: name: glance-tox-functional-py312-glance_store-tips parent: glance-tox-glance_store-tips-base description: | Glance py312 functional tests vs. glance_store master nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: functional-py312 python_use_pyenv: True - job: name: glance-tox-functional-py311-glance_store-tips parent: glance-tox-glance_store-tips-base description: | Glance py311 functional tests vs. glance_store master vars: python_version: 3.11 tox_envlist: functional-py311 - job: name: glance-tox-cursive-tips-base parent: tox abstract: true nodeset: ubuntu-jammy timeout: 2400 description: Abstract job for Glance vs. cursive and related libs required-projects: - name: x/cursive - name: openstack/python-barbicanclient - name: openstack/castellan - job: name: glance-tox-py312-cursive-tips parent: glance-tox-cursive-tips-base description: | Glance py312 unit tests vs. cursive (and related libs) master nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: py312 python_use_pyenv: True - job: name: glance-tox-py311-cursive-tips parent: glance-tox-cursive-tips-base description: | Glance py311 unit tests vs. cursive (and related libs) master vars: python_version: 3.11 tox_envlist: py311 - job: name: glance-tox-functional-py312-cursive-tips parent: glance-tox-cursive-tips-base description: | Glance py312 functional tests vs. cursive (and related libs) master nodeset: debian-bookworm vars: python_version: 3.12 tox_envlist: functional-py312 python_use_pyenv: True - job: name: glance-tox-functional-py311-cursive-tips parent: glance-tox-cursive-tips-base description: | Glance py311 functional tests vs. cursive (and related libs) master vars: python_version: 3.11 tox_envlist: functional-py311 - job: name: tempest-integrated-storage-import parent: tempest-integrated-storage description: | The regular tempest-integrated-storage job but with glance metadata injection post-run: playbooks/post-check-metadata-injection.yaml timeout: 10800 vars: configure_swap_size: 8192 tempest_concurrency: 3 zuul_copy_output: /etc/glance-remote: logs devstack_localrc: GLANCE_STANDALONE: False GLANCE_USE_IMPORT_WORKFLOW: True devstack_services: g-api-r: true devstack_local_conf: post-config: $GLANCE_API_CONF: DEFAULT: enabled_import_methods: "[\"copy-image\", \"glance-direct\"]" wsgi: python_interpreter: /opt/stack/data/venv/bin/python $GLANCE_IMAGE_IMPORT_CONF: image_import_opts: image_import_plugins: "['inject_image_metadata', 'image_conversion']" inject_metadata_properties: ignore_user_roles: inject: | "glance_devstack_test":"doyouseeme?" image_conversion: output_format: raw test-config: "$TEMPEST_CONFIG": image: image_caching_enabled: True - job: name: glance-centralized-cache parent: tempest-integrated-storage-import description: | The regular job to test with glance centralized cache driver vars: devstack_local_conf: post-config: $GLANCE_API_CONF: DEFAULT: image_cache_driver: "centralized_db" - job: name: glance-grenade-centralized-cache parent: grenade-multinode description: | Glance grenade multinode job where old glance will use sqlite as cache driver and new glance will use centralized_db as cache driver. required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/glance vars: # NOTE(abhishekk): We always want base devstack to install from # stable/2023.2 where 'sqlite' is default cache driver, so that # on upgrade we can verify that cache data is transferred from # sqlite to central database. We will remove this job in 'E' # cycle when 'sqlite' cache driver is removed from the glance. grenade_from_branch: stable/2023.2 grenade_devstack_localrc: shared: GLANCE_STANDALONE: False GLANCE_USE_IMPORT_WORKFLOW: True devstack_local_conf: test-config: "$TEMPEST_CONFIG": image: image_caching_enabled: True tox_envlist: all tempest_test_regex: tempest.api.image.v2.admin.test_image_caching - job: name: glance-multistore-cinder-import parent: tempest-integrated-storage-import description: | The regular import workflow job to test with multiple cinder stores vars: devstack_localrc: USE_CINDER_FOR_GLANCE: True GLANCE_ENABLE_MULTIPLE_STORES: True CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2 GLANCE_CINDER_DEFAULT_BACKEND: lvmdriver-1 # Glance RBAC new defaults are tested by default so we need to test # old defaults in some job. GLANCE_ENFORCE_SCOPE: false - job: name: glance-multistore-cinder-import-fips parent: tempest-integrated-storage-import description: | The regular import workflow job to test with multiple cinder stores with fips enabled nodeset: devstack-single-node-centos-9-stream pre-run: playbooks/enable-fips.yaml vars: configure_swap_size: 4096 nslookup_target: 'opendev.org' devstack_local_conf: post-config: $GLANCE_API_CONF: wsgi: python_interpreter: $PYTHON devstack_localrc: USE_CINDER_FOR_GLANCE: True GLANCE_ENABLE_MULTIPLE_STORES: True CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2 GLANCE_CINDER_DEFAULT_BACKEND: lvmdriver-1 GLOBAL_VENV: false - job: name: tempest-integrated-storage-import-standalone parent: tempest-integrated-storage-import description: | The regular tempest-integrated-storage-import-workflow job but with glance in standalone mode vars: devstack_services: g-api-r: false devstack_localrc: GLANCE_STANDALONE: True - job: name: glance-ceph-thin-provisioning parent: devstack-plugin-ceph-tempest-py3 description: | Just like devstack-plugin-ceph-tempest-py3, but with thin provisioning enabled required-projects: - name: openstack/glance_store vars: tempest_concurrency: 3 devstack_local_conf: post-config: $GLANCE_API_CONF: glance_store: rbd_thin_provisioning: True # TODO(pdeore): Remove this jobs once Cinder enable the RBAC new defaults # by default. All other services including glance enable new defaults by # default. - job: name: tempest-integrated-storage-enforce-scope-new-defaults parent: tempest-integrated-storage description: | This job runs the Tempest tests with scope and new defaults enabled Glance services. timeout: 10800 vars: tempest_concurrency: 3 devstack_localrc: # Nova and glance scope and new defaults are enabled by default in # Devstack so we do not need to explicitly set that to True. CINDER_ENFORCE_SCOPE: true # NOTE(gmann): Remove this job once Glance remove the RBAC old defaults. - job: name: tempest-integrated-storage-rbac-old-defaults parent: tempest-integrated-storage description: | This job runs the Tempest tests with glance RBAC old defaults. timeout: 10800 vars: tempest_concurrency: 3 devstack_localrc: GLANCE_ENFORCE_SCOPE: false - project: templates: - check-requirements - integrated-gate-storage - openstack-python3-jobs - openstack-python3-jobs-arm64 - periodic-stable-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - openstack-tox-functional-py39 - openstack-tox-functional-py310 - openstack-tox-functional-py312 - glance-ceph-thin-provisioning: voting: false irrelevant-files: &tempest-irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^glance/hacking/.*$ - ^glance/locale/.*$ - ^glance/tests/.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^tox.ini$ - tempest-integrated-storage: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-enforce-scope-new-defaults: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-import: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-import-standalone: irrelevant-files: *tempest-irrelevant-files - glance-multistore-cinder-import: irrelevant-files: *tempest-irrelevant-files - grenade: irrelevant-files: *tempest-irrelevant-files - tempest-ipv6-only: irrelevant-files: *tempest-irrelevant-files - nova-ceph-multistore: irrelevant-files: *tempest-irrelevant-files - glance-centralized-cache: voting: false irrelevant-files: *tempest-irrelevant-files - glance-grenade-centralized-cache: voting: false irrelevant-files: *tempest-irrelevant-files - glance-secure-rbac-protection-functional gate: jobs: - openstack-tox-functional-py39 - openstack-tox-functional-py310 - openstack-tox-functional-py312 - tempest-integrated-storage: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-enforce-scope-new-defaults: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-import: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-import-standalone: irrelevant-files: *tempest-irrelevant-files - grenade: irrelevant-files: *tempest-irrelevant-files - grenade-skip-level: irrelevant-files: *tempest-irrelevant-files - tempest-ipv6-only: irrelevant-files: *tempest-irrelevant-files - nova-ceph-multistore: irrelevant-files: *tempest-irrelevant-files - glance-secure-rbac-protection-functional experimental: jobs: - glance-tox-py312-glance_store-tips - glance-tox-py311-glance_store-tips - glance-tox-functional-py312-glance_store-tips - glance-tox-functional-py311-glance_store-tips - barbican-tempest-plugin-simple-crypto - grenade-multinode - tempest-pg-full: irrelevant-files: *tempest-irrelevant-files - glance-multistore-cinder-import-fips: irrelevant-files: *tempest-irrelevant-files - tempest-integrated-storage-rbac-old-defaults periodic: jobs: # NOTE(rosmaita): we only want the "tips" jobs to be run against # master, hence the 'branches' qualifiers below. Without them, when # a stable branch is cut, the tests would be run against the stable # branch as well, which is pointless because these libraries are # frozen (more or less) in the stable branches. # # The "tips" jobs can be removed from the stable branch .zuul.yaml # files if someone is so inclined, but that would require manual # maintenance, so we do not do it by default. Another option is # to define these jobs in the openstack/project-config repo. # That would make us less agile in adjusting these tests, so we # aren't doing that either. - glance-tox-functional-py312-oslo-tips: branches: master - glance-tox-functional-py311-oslo-tips: branches: master - glance-tox-py312-keystone-tips: branches: master - glance-tox-py311-keystone-tips: branches: master - glance-tox-functional-py312-keystone-tips: branches: master - glance-tox-functional-py311-keystone-tips: branches: master - glance-tox-py312-glance_store-tips: branches: master - glance-tox-py311-glance_store-tips: branches: master - glance-tox-functional-py312-glance_store-tips: branches: master - glance-tox-functional-py311-glance_store-tips: branches: master - glance-tox-py312-cursive-tips: branches: master - glance-tox-py311-cursive-tips: branches: master - glance-tox-functional-py312-cursive-tips: branches: master - glance-tox-functional-py311-cursive-tips: branches: master - glance-multistore-cinder-import-fips: branches: master - tempest-integrated-storage-rbac-old-defaults: branches: master ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/AUTHORS0000664000175000017500000006303200000000000014132 0ustar00zuulzuul00000000000000Aaron Rosen Abhijeet Malawade Abhishek Chanda Abhishek Kekane Abhishek Kekane Adam Gandelman Adam Gandelman Adam Spiers Ade Lee Adrien Cunin Ajaya Agrawal Akihiro Motoki Ala Rezmerita Alan Bishop Alberto Planas Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Gaynor Alex Meade Alexander Bashmakov Alexander Gordeev Alexander Maretskiy Alexander Tivelkov Alexandra Settle Alexandra Settle Alexei Kornienko Alexey Galkin Alexey Yelistratov Alfredo Moralejo Amala Basha AmalaBasha AmalaBasha Anastasia Vlaskina Andre Aranha Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Andrew Tranquada Andrey Brindeyev Andrii Ostapenko Andy McCrae Anita Kuno Ankit Arora Archit Sharma Arnaud Legendre Artur Svechnikov Arvind Nadendla Ashish Jain Ashwini Shukla Aswad Rangnekar Atsushi SAKAI Attila Fazekas Auktavian Garrett Avinash Prasad AvnishPal Balazs Gibizer Bartosz Fic Ben Nemec Ben Nemec Ben Roble Bernhard M. Wiedemann Bertrand Lallau Bertrand Lallau Bhagyashri Shewale Bhuvan Arumugam Bin Zhou Bo Wang Boris Pavlovic Brandon Palm Brant Knudson Brian Cline Brian D. Elliott Brian Elliott Brian Elliott Brian Lamar Brian Rosmaita Brian Rosmaita Brian Waldon Brianna Poulos Béla Vancsics Cao ShuFeng Cao Xuan Hoang Castulo J. Martinez Cerberus Chang Bo Guo ChangBo Guo(gcb) Charles Short Chen Fan Chmouel Boudjnah Chris Allnutt Chris Behrens Chris Buccella Chris Buccella Chris Dent Chris Fattarsi Chris St. Pierre Christian Berendt Christian Berendt Christian Berendt Christopher MacGown Chuck Short Chuck Short Cindy Pallares Clark Boylan Clint Byrum Corey Bryant Cory Benfield Cory Wright Cyril Roelandt DamonLi Dan Prince Dan Smith Dane Fichter Daniel Krook Daniel Pawlik Danny Al-Gaaf Danylo Vodopianov Darja Shakhray Darren White Davanum Srinivas Davanum Srinivas Dave Chen Dave McNally Dave Walker (Daviey) David Hill David Koo David Peraza David Rabel David Ripton David Sariel Dean Troyer DeepaJon Deepti Ramakrishna DennyZhang Derek Higgins Desmond Sponsor Dharini Chandrasekar Dina Belova Dinesh Bhor Dirk Mueller Dmitry Kulishenko Dmitry Tantsur Dolph Mathews Dominic Schlegel Donal Lafferty Doron Chen Doug Hellmann Doug Hellmann Dr. Jens Harbott Dr. Jens Harbott Drew Varner Drew Varner Duncan McGreggor Durga Malleswari Varanasi Eddie Sheffield Edgar Magana Edward Hope-Morley Eldar Nugaev Elena Ezhova Elod Illes Eoghan Glynn Eric Brown Eric Harney Eric Windisch Erik Olof Gunnar Andersson Erno Kuvaja Erno Kuvaja Ethan Myers Eugeniya Kudryashova Ewan Mellor Fabian Wiesel Fabio M. Di Nitto Fei Long Wang Fei Long Wang Fengqian Gao Flaper Fesp Flavio Percoco Florent Flament Gabriel Hurley Gage Hugo Gary Kotton Gauvain Pocentek Geetika Batra GeetikaBatra GeetikaBatra George Peristerakis Georgy Okrokvertskhov Gerardo Porras Ghanshyam Ghanshyam Mann Ghanshyan Mann Gonéri Le Bouder Gorka Eguileor Graham Hayes Grant Murphy Gregory Haynes Grégoire Unbekandt Guillaume Espanel Guoqiang Ding Gábor Antal Ha Van Tu Haikel Guemar Haiwei Xu Han Guangyu Harsh Shah Harshada Mangesh Kakad He Yongli Hemanth Makkapati Hemanth Makkapati Hengqing Hu Henrique Truta Hervé Beraud Hirofumi Ichihara Hui Xiang Ian Cordasco Ian Cordasco Ian Wienand Iccha Sethi Igor A. Lukyanenkov Ihar Hrachyshka Ildiko Vancsa Ilya Pekelny Inessa Vasilevskaya Ionuț Arțăriși Isaku Yamahata Itisha Dewan J. Daniel Schmidt Jack Ding Jakub Ruzicka James Carey James E. Blair James Li James Morgan James Page James Polley Jamie Lennox Jamie Lennox Jared Culp Jasakov Artem Jason Koelker Jason Kölker Javeme Javier Pena Jay Pipes Jeffrey Zhang Jeremy Stanley Jesse Andrews Jesse J. Cook Jesse Pretorius Jia Dong Jin Li Jin Long Wang Jinwoo 'Joseph' Suh Joe Gordon Joe Gordon Johannes Erdfelt John Bresnahan John L. Villalovos John Lenihan John Warren Jon Bernard Jorge Niedbalski Joseph Suh Josh Durgin Josh Durgin Josh Kearney Joshua Harlow Joshua Harlow JuPing Juan Antonio Osorio Robles Juan Manuel Olle Juerg Haefliger Julia Varlamova Julien Danjou Jun Hong Li Justin Santa Barbara Justin Shepherd KATO Tomoyuki KIYOHIRO ADACHI Kamil Rykowski Karol Stepniewski Kasey Alusi Kashyap Chamarthy Ken Pepple Ken Thomas Kent Wang Kentaro Takeda Keshava Bharadwaj Kevin L. Mitchell Kevin_Zheng Khuong Luu Kirill Zaitsev Kui Shi Kun Huang Lakshmi N Sampath Lance Bragstad Lars Gellrich Leam Leandro I. Costantino Li Wei Liang Fang Lianhao Lu Lin Yang Liu Yuan Long Quan Sha Lorin Hochstein Louis Taylor Louis Taylor Lucian Petrut Luigi Toscano Luis A. Garcia Luong Anh Tuan Lyubov Kolesnikova M V P Nitesh Major Hayden Marc Abramowitz Mark J. Washenberger Mark J. Washenberger Mark McLoughlin Mark Washenberger Martin Kletzander Martin Kopec Martin Mágr Martin Tsvetanov Maru Newby Masashi Ozawa Matt Dietz Matt Fischer Matt Riedemann Matt Riedemann Matthew Booth Matthew Edmonds Matthew Thode Matthew Treinish Matthew Treinish Matthias Schmitz Maurice Leeflang Mauro S. M. Rodrigues Maxim Nestratov Mehdi Abaakouk Michael J Fork Michael Krotscheck Michael Still Michal Arbet Michal Dulko Mike Abrams Mike Bayer Mike Fedosin Mike Fedosin Mike Lundy Mike Turvey Mingda Sun Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mitya_Eremeev Monty Taylor Mridula Joshi Munoz, Obed N NAO NISHIJIMA Naohiro Sameshima Nassim Babaci Neil Hanlon Ngo Quoc Cuong Nguyen Hai Nguyen Hung Phuong Nguyen Van Trung Niall Bunting Niall Bunting NiallBunting Nicholas Kuechler Nicolas Bock Nicolas Simonds Nikhil Komawar Nikhil Komawar Nikhil Komawar Nikolaj Starodubtsev Ning Yao Noboru Arai Noboru arai Nolwenn Cauchois Oleksii Chuprykov Olena Logvinova Ondřej Nový OpenStack Release Bot Pamela-Rose Virtucio Pankaj Mishra Patrick Mezard Paul Bourke Paul Bourke Paul McMillan Paul-Emile Element Pavan Kumar Sunkara Pavlo Shchelokovskyy Pawel Koniszewski Pawel Skowron Peng Yong Pengju Jiao Pete Zaitcev Pierre-Samuel Le Stang Pranali Deore Pranali Deore PranaliD PranaliDeore Preetika Pádraig Brady Pádraig Brady Qiaowei Ren Radosław Piliszek Radu Rafael Weingärtner Rainya Mosher Rajat Dhasmana Rajesh Tailor Ravi Shekhar Jethani Ray Chen Reynolds Chin Rick Bartra Rick Clark Rick Harris Robert Collins Rohan Kanade Roman Bogorodskiy Roman Bogorodskiy Roman Vasilets Ronald Bradford Rongze Zhu RongzeZhu Rui Yuan Dou Rui Zang Russell Bryant Russell Sim Russell Tweed Ryan Selden Sabari Kumar Murugesan Sachi King Sachin Patil Sam Morrison Sam Stavinoha Samuel Merritt Sascha Peilicke Sascha Peilicke Sathish Nagappan Scott McClymont Sean Dague Sean Dague Sean McGinnis Sean McGinnis Sean Mooney Sergey Nikitin Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shinya Kawabata Shuquan Huang Soren Hansen Stan Lagun Stephen Finucane Stephen Gordon Steve Kowalik Steve Lewis Stuart McLaren Sulochan Acharya Supalerk Jivorasetkul Svetlana Shturm Takashi Kajinami Takashi Kajinami Takashi Natsume Takeaki Matsumoto Taku Fukushima Tatyana Leontovich Tee Ngo Therese McHale Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Bechtold Thomas Leaman Tim Burke Tim Daly, Jr Timothy Symanczyk Toan Nguyen Tom Cocozzello Tom Hancock Tom Leaman Tomas Hancock Tomislav Sukser Tomoki Sekiyama Tony Breeds Travis Tripp Travis Tripp Unmesh Gurjar Unmesh Gurjar Vaibhav Bhatkar Venkatesh Sampath Venkatesh Sampath Victor Coutellier Victor Morales Victor Sergeyev Victor Stinner Vincent Untz Vishvananda Ishaya Vitaliy Kolosov Vlad Gusev Vladislav Kuzmin Vyacheslav Vakhlyuev Waldemar Znoinski Wayne A. Walls Wayne Okuma Wen Cheng Ma WenjunWang1992 <10191230@zte.com.cn> Wu Wenxiang Xi Yang XiaBing Yao XiaojueGuan YAMAMOTO Takashi Yaguang Tang Yaguo Zhou Yandong Xuan Yanis Guenane Youngjun Yufang Zhang Yuiko Takada Yuriy Taraday Yusuke Ide ZHANG Hua Zane Bitter ZhengMa Zhenguo Niu Zhenguo Niu Zhi Yan Liu ZhiQiang Fan ZhiQiang Fan Zhiteng Huang ZhongShengping Zhongyue Luo abhishek-kekane abhishekkekane afariasa amalaba anguoming ankitagrawal ankur annegentle april bbwang5827 bhagyashris bpankaj bria4010 caishan caoyuan chenaidong1 chenpengzi <1523688226@qq.com> chenwei chenxing daisy-ycguo dangming deepak_mourya dineshbhor eddie-sheffield eos2102 ericxiett frickler.admin fungi.admin gengjh ghanshyam ghanshyam mann haobing1 henriquetruta houming-wang huangtianhua hussainchachuliya hzrandd <82433422@qq.com> iccha iccha-sethi iccha.sethi imacdonn inspurericzhang isethi itisha jakedahn jare6412 jaypipes@gmail.com <> jiangpch jinxingfang jneeee jola-mirecka junboli kairat_kushaev ke-kuroki khashf kylin7-sg lawrancejing leo.young leseb liangjingtao lijunbo likui ling-yun lingyongxu liuqing liuxiaoyang liyingjun liyingjun lizheming llg8212 ls1175 makocchi makocchi-git manchandavishal marianitadn mathrock melissaml nanhai liao neha.pandey oorgeron pawnesh.kumar pengyuesheng pragathi93 pran1990 prithiv qiaomin raiesmh08 ravikumar-venkatesan renliang17 ricolin rsritesh rtmdk sai krishna sripada sarvesh-ranjan shaofeng_cheng shashi.kant shilpa.devharakar shreeduth-awasthi shrutiranade38 shu,xinxin songwenping space sridevik sridevik sudhir_agarwal tanlin taoguo ting.wang tmcpeak tobe venkatamahesh venkatamahesh vinay_m wangdequn wanghong wanghui wangqi wangxiyuan wangzihao weiweigu whoami-rajat wu.chunyang wu.shiming xuanyandong xurong00037997 yangxurong yatin yongiman yuqian yuyafei zengjia zhangbailin zhengwei6082 zhengyao1 zhiguo.li zhu.rong zhufl zwei “Akhila 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/CONTRIBUTING.rst0000664000175000017500000000111400000000000015514 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/glance Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/glance For more specific information about contributing to this repository, see the glance contributor guide: https://docs.openstack.org/glance/latest/contributor/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867984.0 glance-29.0.0/ChangeLog0000664000175000017500000070742700000000000014651 0ustar00zuulzuul00000000000000CHANGES ======= 29.0.0 ------ * api-ref: add 'compressed' container\_format * Fixed typo in discovery.py * Documentation: fix default value for rbd\_store\_chunk\_size * Remove unrecheable code * Imported Translations from Zanata * Add metadef value for stateless firmware * Add new wait logic for functional test of add location API 29.0.0.0b3 ---------- * Refresh Glance example configs for dalmatian m3 * Release notes for Dalmatian Milestone 3 * Add iso file format inspector * Remove default override for RBAC config options * Revert image state to queued if conversion fails * Auto-detect qemu-img qed support for unit tests * Make separate schema for new location API * Remove location\_strategy functionality * Bump Images API version to 2.17 * Add api-ref and documentation for new location apis * Add new get location api * Add functional tests for new add-location API * Add new add location api * Add Location Import task flow * Do not set\_acls if store is not associated to glance node * Imported Translations from Zanata * Revert "Fix import job to provide valid disk-formats list to tempest" * Fix /healthcheck url typo in glance-api-paste * zuul: Drop openstack-tox-functional-py38-fips job 29.0.0.0b2 ---------- * Add safety check and detection support to FI tool * Fix: optimized upload volume in Cinder store * Revert "Make import jobs non-voting" * Add releasenote for CVE-2024-32498 fix * Increase timeout for tempest-integrated-storage-import job * Make import jobs non-voting * Add file format detection to format\_inspector * Add QED format detection to format\_inspector * Reject unsafe qcow and vmdk files * Add VMDK safety check * Extend format\_inspector for QCOW safety * Reject qcow files with data-file attributes * Fix import job to provide valid disk-formats list to tempest * Deprecate the "metadata\_encryption\_key" option * Release notes for Dalmatian Milestone 2 * Make location URL compatible with cinder backend * Drop sqlparse * docs: Correct grammar nit * Fix 500 if multi-tenant swift is used * Move all periodic tips jobs from py38/39 to py311/312 * Make functional-py312 voting and add it to the gate * Imported Translations from Zanata * Add non-voting functional-py312 job 29.0.0.0b1 ---------- * Refresh Glance example configs for dalmatian m1 * Release notes for Dalmatian Milestone 1 * Fix broken glance-cache-prefetcher utility * Stop using deprecated defusedxml.cElementTree * Remove sendfile support * reno: Update master for unmaintained/zed * Remove glossary * Remove allow\_additional\_image\_properties * Replace remaining usage of [DEFAULT] sql\_connection * Change DB migration constant to 2024\_2 * Architecture docs: Fix list indentation * Remove unused run\_sql\_cmd * Remove SQLAlchemy tips jobs * doc: remove trailing dot from hypervisor\_type list * Cinder: Remove ambiguous warning * Docs: be more explicit regarding oslo\_limit.endpoint\_id * doc: Remove non-existing auth parameters of glance-cache.conf * Remove the digest\_algorithm option * Update master for stable/2024.1 * refectory: fix comments 28.0.1 ------ * Fix glance-api if cache is disabled * reno: Update master for xena Unmaintained status * reno: Update master for wallaby Unmaintained status * reno: Update master for victoria Unmaintained status * Docs: Add info about the cinder store * Refresh Glance example configs for caracal m3 * Release notes for Caracal Milestone 3 * New grenade job to upgrding cache driver * Require more specific exception * Make \`centralized\_db\` cache driver default * Use centralized\_db cache driver in tempest jobs * Set a lock\_path for tests * Fix flaky test related to cache migration * [docs] Configure centralized\_db cache driver * Prepare for castellan 4.4.0 * Migrate from SQLite to Centralized db * [minor] Fix doc string for class * Deprecate glance scrubber * Make openstack-tox-functional-py38-fips job non-voting * Move sqlite code in common module * Read global config file for cache utilities * Introduce centralized database driver for image cache * Deprecate the "location\_strategy" option * Add required database API(s) for cache * inject\_image\_metadata plugin: Fix documentation * reno: Update master for yoga Unmaintained status * Add new tables for cache operations * Use constants for common rules * Support Stream Optimized VMDKs * Drop ineffective options from config file * Replace CRLF by LF * Bump hacking * Replace usage of deprecated [DATABASE] sql\_connection * Deprecate the "glance-cache-manage" command * Deprecate sqlite cache driver * Fix test failures with oslo.limit 2.3.0 * Update python classifier in setup.cfg 28.0.0.0b2 ---------- * Add Packed Virtqueue extra spec and image properties * Increase timeout for glance cinder multistore job * Deprecate Windows OS support * Revert "Make glance-tox-functional-py39-sqlalchemy-tips job non-voting" * Make glance-tox-functional-py39-sqlalchemy-tips job non-voting * Drop unused pyOpenSSL * Do not load api-paste.ini using oslo.config * Unset VENV python\_interpreter in fips job * zuul: Replace use of deprecated regex syntax * Imported Translations from Zanata * Update master for stable/2023.2 27.0.0 ------ * api-ref: Fix RST formatting * Remove ubuntu focal job * Reduce concurrency for tempest jobs * Refresh Glance example configs for bobcat rc1 * Imported Translations from Zanata * Fix openstack-tox-py311 job * Set GLOBAL\_VENV to false for centos * Fix post failures causing by image conversion failure * Sort locations based on store weight * db: Use context, session args for metadef DB APIs * db: Trivial parameter swap * Add job to test with SQLAlchemy master (2.x) * db: Remove explicit autocommit * db: Replace use of update.values parameter * db: Sync migrations, models * db: Drop the convert\_unicode, unicode\_error flags * db: Don't inherit cache for DeleteFromSelect * db: Re-use a single connection * db: Drop use of implicit auto-commit * db: Enable foreign keys for SQLite backend * db: Replace dict-style access of Row attributes * db: Don't pass strings to Connection.execute() * db: Replace use of aliased keyword arguments * db: Pass case.whens as positionals, not a list * db: Replace use of 'Query.values()' * db: Don't invoke and\_() without arguments * db: Enable batch migrations by default * db: Disable logging by default for alembic * db: Remove use of 'bind' arguments * db: Replace use of legacy select() calling style * tests: Add tests for loading, unloading metadefs * tests: Enable SQLAlchemy 2.0 deprecation warnings * db: Avoid import variable shadowing * db: Update import location of declarative\_base * tox: Unset basepython * tests: Use WarningsFixture in all tests * tests: Restore - don't reset - warning filters * Remove 'egg\_info' configuration * Imported Translations from Zanata * Clean generated source RST files * Correct item numbering in install docs 27.0.0.0b2 ---------- * Release notes for Bobcat Milestone 2 * Refresh Glance example configs for bobcat milestone 2 * Remove the last occurrence of six.add\_metaclass * Doc: remove "stores" and "default\_store" options * Doc: Factorise installation instructions * Imported Translations from Zanata * Handle case for multiple cirros images * Test glance RBAC old defaults * Run integrated test if zuul.yaml file is changed 27.0.0.0b1 ---------- * Bump eventlet to a version that fixes #632[1] * Imported Translations from Zanata * Refactor pipeline definition * Release notes for Bobcat Milestone 1 * Refresh Glance example configs for bobcat milestone 1 * Add rocky linux to useful image properties os\_images list * Deploy healthcheck middleware as app instead of filter * Update functional jobs for 2023.2 * Fix functional test failures with PasteDeploy 3.x * Remove duplicate value in compute-host-capabilities.json * Change DB migration constant to 2023\_2 * Add a check on startup for staging directory * Imported Translations from Zanata * Update master for stable/2023.1 26.0.0 ------ * Revert "Make glance-secure-rbac-protection-functional job non-voting" * Make glance-secure-rbac-protection-functional job non-voting * Enclose all APIv2 versions in single quotes 26.0.0.0b3 ---------- * Release notes for Antelope Milestone 3 * Refresh Glance example configs for antelope milestone 3 * Remove deprecated \`\`enforce\_secure\_rbac\`\` option * Enabled new defaults and scope checks by default * doc/useful-image-properties: add missing \`\` to close a code block * Add multihash info in glance documentation * Limit CaptureRegion sizes in format\_inspector for VMDK and VHDX * Allow easier admin override in policies * Update migration constant * Remove migration constant job and test * Fix E741 issues 26.0.0.0b2 ---------- * Release notes for Antelope Milestone 2 * Fix cinder tests for refactoring effort * Refresh Glance example configs for antelope milestone 2 * Fix tox4 error * Enforce image safety during image\_conversion * Update the RADOS authentication link * Drop tag assertion from README * docs: Add note about introduction of per-tenant quotas * Remove useless test for the CooperativeReader class * vmware does't support VirtualSriovEthernetCard * Further robustification of format\_inspector * Fix unintentional exception inspecting VMDK * Imported Translations from Zanata * Fix a document description error * add openstack-python3-zed-jobs-arm64 job * Quota configuration: improve example oslo\_limit section * Imported Translations from Zanata * Imported Translations from Zanata * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed * Fix a wrong assertion method 25.0.0 ------ * Imported Translations from Zanata * Fixes the api-ref response * Imported Translations from Zanata 25.0.0.0b3 ---------- * RBAC updates: drop system scope * Minor fix on Interoperable Image Import admin doc * Refresh Glance example configs for zed milestone 3 * Remove 'glance-download' from default import methods * [glance-download] Add missing config options to config list * Add documentation for glance-download plugin * Imported Translations from Zanata * Release notes for Zed Milestone 3 * Implement glance-download internal plugin * Extending stores-detail API * Add support to get glance endpoint * Fix a deprecation warning about threading.Thread * remove expired notes for Multi Store feature * Mock getaddrinfo in cinder store tests * Imported Translations from Zanata * Remove unused pylintrc * Update exception for property protection file * Imported Translations from Zanata * Enable Image cache tempest testing * glance-manage: fix crash under Python3.11 * Immediate caching of an image 25.0.0.0b2 ---------- * Remove secure\_proxy\_ssl\_header opt from Glance * Release notes for Zed Milestone 2 * Refresh Glance example configs for Zed milestone 2 * Include os-brick configuration options * Doc: fix URL in API call * Fix URLs in the form docs.openstack.org/developer/$project * Add missing oslo\_limit options * Adds purge command to glancemanage man page * doc: update the kvm kw\_vif\_model driver list * Remove unicode literal strings * Imported Translations from Zanata * [APIImpact] Correct API response code for DELETE cache APIs * glance-cache-prefetcher: Set up access to multistore backends * Remove options for oslo.db thread pool support * Fix fips job * Bindep fixes for SUSE-like systems * Remove dead code of auth and policy layers * Update api-ref: versions response for master branch * Modify wrong file path in README * Doc: Glance group management * Imported Translations from Zanata * Bump Image API version to 2.16 * Remove workaround for python\_exec cannot be None * Revert "Disable import workflow in glance cinder jobs" * Add openstack-tox-functional-py38-fips job * Update python testing as per zed cycle teting runtime * Correction in zed milestone 1 releasenotes * Revert "Override GLANCE\_USE\_IMPORT\_WORKFLOW in cinder jobs" * Add CPU Mode Metadata Def * Add flavor and image options to select emulated virtual tpm * Release notes for Zed Milestone 1 * Use show-policy to render default policy in documentation * Refresh Glance example configs for Zed milestone 1 * Migrate CentOS Stream 8 job to CentOS Stream 9 * doc: add aarch64 to list of architecture image properties * Fix failing namespace list delete race * Override GLANCE\_USE\_IMPORT\_WORKFLOW in cinder jobs * Delay string interpolations at logging calls * Disable import workflow in glance cinder jobs * Added cli\_opts and cache\_opts * fixed typo in multistore documentation * [APIImpact] Correct API response code for PUT /v2/cache/{image\_id} * [CI] Add upper constraints to install command * glance-manage can purge all deleted rows * [api-ref] update cache manage API info * Internal server error if shared member tries to stage data to image * Update migration constant * Imported Translations from Zanata * api-ref needs update about 'checksum' image prop * Move FIPS jobs to experimental and periodic queue * Update doc8 ignore-path * Update config doc for cinder backend * Remove Babel requirement * Fix warning in the release notes * Update master for stable/yoga 24.0.0 ------ * Add grenade-skip-level irrelevant-files config * Change default value for [wsgi]/python\_interpreter * Sync example configs for Yoga * Yoga RC-1 release notes * Added a new API to expose store info * Fix overwriting of existing tags while creating new tags * Cache management API endpoints * Make FIPS job non-voting * Update migration constant * Add Python 3 only classifier * Add fips check jobs * [APIImpact] Quota usage API * Fix set\_property\_atomic() boolean type casting * tests: Remove check for os.set\_inheritable * doc: Remove info on running under Python 3 * Remove '\_\_unicode\_\_' magic methods * Remove glance.common.client.\_encode\_headers * Remove final six usage * Remove six.assertRaisesRegex usage * Remove remaining six.moves usage * Remove six.moves.urllib usage * Remove six.moves.http\_client usage * Remove six.moves.range usage * Replace six.BytesIO, six.StringIO usage * Remove six.text\_type, six.binary\_type usage * Document new properties used by Ironic * Fix tests on Python3.10 * Bump SQLalchemy to version that supports PY3.9 * Use LOG.warning instead of deprecated LOG.warn * Remove six.iteritems usage * Remove six.add\_metaclass usage * Updating python testing as per Yoga testing runtime * Fix tests for logging connection info * Replace FakeObject with mock.MagicMock * Imported Translations from Zanata * Refactor glance cinder tests * Add "bochs" as a possible video model * Update Interop doc * Add missing packages to test dependencies * Remove broken tempest-full-py3-opensuse15 job * Update qcow test to use qcow2 instead * Fix typos * Imported Translations from Zanata * Use singular they instead of "he/she" where possible * Remove dead psutil-related code * Add libpcre3-dev/pcre-devel to bindep.txt * Use single quotes in image policies * Bump oslo.policy to version 3.8.1 * Drop support for SQLite < 3.7 * Bump oslo.log to version 4.5.0 * Add Python3 yoga unit tests * Update master for stable/xena 23.0.0.0rc1 ----------- * Xena RC-1 release notes * Remove duplicate \_header() implementations in metadef tests * Remove duplicate \_url() methods from functional test classes * Move global constants to test module setUp * Remove duplicate namespace utilities in functional tests * Fixed image\_format typo in doc * Fix failed cinder store migration for non-owners * Reproduce bug #1932337 * Make signature verification go back to 'queued' * [uwsgi] Add missing pefetch periodic job 23.0.0.0b3 ---------- * Xena milestone 3 release notes * Refresh Glance example configs for Xena milestone 3 * Add doc support for delete-from-store API * Implement project personas for metadef tags * Implement project personas for metadef properties * Implement project personas for metadef resource-types * Implement project personas for metadef objects * Implement project personas for metadef namespaces * 'community' images need to be treated as public * Check policies for Image Cache in API * Move metadef tag policy checks in the API * Move metadef property policy checks in the API * Check policies for image tasks information in API * Add release note about policy-refactor * Check policies for Image Tags in API * Check policies for image import operation in API * Avoid reinitialization of image repo * Move Tasks policy checks in the API * Check policies for delete image for store in API * Check deactivate, reactivate policy in the API * add doc about KVM support e1000e vif model * Add missing [oslo\_reports] options * Move metadef resource type association policy checks in the API * Check add\_image policy in the API * Add missing forbidden to not found case for GET namespace API * trivial: Double quote check\_str for consistency * Check policies for staging operation in API * Check download\_image policy in the API * Refactor gateway auth layer for image factory * Suppress policy deprecation and default change warnings * Check upload\_image policy in the API * Load options from the castellan library * Resolve compatibility with oslo.db future (redux) * Add missing parameters for the healthcheck middleware * Deprecate task specific policies * Refactor gateway auth layer for task APIs * Move member policy checks to API layer * setup.cfg: Fix errant key * Resolve compatibility with oslo.db future * Move metadef object policy checks in the API * Move metadef namespace policy checks in the API * Refactor gateway auth layer for member APIs * Check delete\_image policy in the API * Add check\_is\_image\_mutable() legacy helper * Fix: glance cinder functional test * Refactor gateway auth layer for metadef APIs * Fix failing copy\_image flow init * Add a member field to Image when appropriate * Check get\_image(s) in the API * Make image update check policy at API layer * Refactor gateway get\_repo auth layer * Remove SSL configuration section from docs * Clean out deprecations from paste.ini * tests: Remove use of 'oslo\_db.sqlalchemy.test\_base' * Add api\_patch() to SynchronousAPIBase * Remove dead 403->404 code * Replace deprecated assertDictContainsSubset * Get rid of deprecated xml.etree.cElementTree * Add base policy check module * DB layer suppress Forbidden to NotFound error * Add functional tests for metadef resource types * Move lazy store update to locations layer * Make property protection tests use member role * Make our functional tests compatible with RBAC * Change database migration version to xena * db: Remove "support" for configurable migration backends * trivial: Remove references to sqlalchemy-migrate * db: Remove old sqlalchemy-migrate migrations * db: Stop checking for DB under sqlalchemy-migrate control * db: Replace sqlalchemy-migrate call with alembic equivalent * db: Move 'schema' module up a level 23.0.0.0b2 ---------- * Xena milestone 2 release notes * Refresh Glance example configs for Xena milestone 2 * Fix oslo policy DeprecatedRule warnings * Fix the policy deprecation message * Add unified quotas documentation * Add image\_count\_uploading quota enforcement * Add user\_get\_uploading\_count() to DB API * Add a nonvoting functional job with RBAC defaults * Use default policies in our tests * Add image\_count\_total quota enforcement * Add user\_get\_image\_count() to DB API * Add image\_stage\_total quota enforcement * Add user\_get\_staging\_usage() to DB API * Enforce keystone limits for image upload * Fix broken test\_update\_queued\_image\_with\_hidden * Document domain-related cinder backend options * Add unified quotas infrastructure * Update image.size after conversion * Drop lower-constraints jobs * Make image stage set image.size * Refactor SynchronousAPIBase for more cases * Revert "Remove all usage of keystoneclient" * Fix missing context args to get\_flow() * Changed minversion in tox to 3.18.0 * Make taskflow\_executor log get\_flow() exceptions * Remove references to sys.version\_info * [Doc] Update Freenode to OFTC as our IRC server * Make project\_id a formal target alias * Make wsgi tests use noauth deployment flavor * Fix test\_cache\_middleware tests to use auth * Fix auth info on scrubber tests * Allow member creation when using db.simple api * setup.cfg: Replace dashes with underscores * Cinder Store: Update legacy images tests * Ignore stale image property removal * Fix image/tasks API for in-progress tasks * Guidelines for core reviewers * Imported Translations from Zanata * Add Python3 xena unit tests * Update master for stable/wallaby 22.0.0 ------ * Fix a typo in contributor docs * Make some metadef operations admin-only 22.0.0.0b3 ---------- * Add a release note for secure RBAC personas * Wallaby milestone 3 release notes * Fix erroneous exit from copy wait loop * Update the task policies * trivial: remove unnecessary grouping in base policies * trivial: Fix minor grammatical issues in cache middleware * Refresh Glance example configs for Wallaby milestone 3 * Implement project personas for image actions * Fix test\_cache\_middleware ImageStub * Move setting of enforce\_scope to devstack side * Add glance functional protection tests to check and gate * Enable second glance worker for import testing * Add housekeeping module and staging cleaner * Add administrator docs for distributed-import * Distributed image import * Make functional tests set node\_staging\_uri * Fail to start if authorization and policy is misconfigured * Add get\_ksa\_client() helper * Add a test for migration naming and phase rules * Bump Images API version to 2.12 * Update API docs for new /v2/images/{image\_id}/tasks API * New API /v2/images/{id}/tasks * Utilize newly added tasks database fields * Extract req.context in a variable * Expand tasks database table to add more columns * Pass oslo.context RequestContext objects directly to policy enforcement * Properly handle InvalidScope exceptions * Add basic/common personas to base policies * Bump requirements to prepare for secure RBAC * Stop raising 403 when image is not found * Fix: cinder store test * Make glance cinder multistore job voting * Cleanup remaining tenant terminology in glance API docs * trivial: Fix a typo in devstack plugin.sh * Remove unused option "owner\_is\_tenant" * Uncap PrettyTable * Make copy\_image plugin use action wrapper * Make inject\_image\_metadata use action wrapper * Fix nonsensical test mocks and assertions * Allow plugins to mutate image extra\_properties * Make image\_conversion use action wrapper * Add missing fail case tests for image\_conversion * Make action wrapper support arbitrary properties * Make web-download revert all stores on fail * Pass ImageActionWrapper to internal plugins * Add functional tests for cinder multiple store * Add devstack plugin script * Add policy sample file in doc * Replace collections.Iterable * [goal] Deprecate the JSON formatted policy file * Exclude os\_glance namespace from property quota * Make os\_glance namespace reserved * Change database migration version to wallaby * Update docs and renos for os\_glance reservation 22.0.0.0b2 ---------- * Fix upgrade checks for sheepdog * Tox.ini: add py39 * Fix a typo in functional helper class * Move some helper functions to base class * Update version of doc8 * Imported Translations from Zanata * [Doc] Remove description about v1 api and glance-registry * Replace md5 with oslo version * Bump lower\_constraints and requirements * Run nova-ceph-multistore only when tempest is ran * Imported Translations from Zanata * Remove 'admin\_role' option * Adding gate job for glance cinder store * Imported Translations from Zanata * Bump Images API version to 2.11 * Adjust jobs for devstack WSGI mode default * Add Python3 wallaby unit tests * Update master for stable/victoria 21.0.0.0rc1 ----------- * Victoria RC-1 release notes * Fix cleaning of web-download image import * Do not use OSC in infra playbook * Image import "web-download" check downloaded size * docs: Remove cruft from 'conf.py' * docs: Convert table of image properties to definition list * docs: Remove references to XenAPI driver 21.0.0.0b2 ---------- * Victoria milestone 3 release notes * Run the nova-ceph-multistore job against glance * Corrections in default value of all\_stores\_must\_succeed * [Docs] Cinder multiple stores for glance * Support cinder multiple stores * Remove babel.cfg etc * Refresh Glance example configs for Victoria milestone 3 * Add a release note about import locking * [Trivial]Add missing print parameters in log messages * Make our import-workflow job also convert images to raw * Disable wait\_for\_fork() kill aggression if expect\_exit=True * Make our ceph job enable thin provisioning * Cleanup import status information after busting a lock * Add ImageLock to base flow checks * Functional test enhancement for lock busting * Handle atomic image properties separately * Move SynchronousAPIBase to a generalized location * Add functional test for task status updating * Implement time-limited import locking * Add FakeData generator test utility * Make test\_copy\_image\_revert\_lifecycle handle 409 on import retry * Poll for final state on test\_copy\_image\_revert\_lifecycle() * Fix import failure status reporting when all\_stores\_must\_succeed=True * zuul: use the new barbican simple-crypto job * Functional reproducer for bug 1891352 * Make wait\_for\_fork() more robust against infinite deadlock * Update task message during import * Heartbeat the actual work of the task * Add image\_delete\_property\_atomic() helper * Flesh out FakeImage for extra\_properties * Add tests for \_ImportToStore.execute() * Add testing for \_CompleteTask in api\_image\_import * Fix non-deterministic copy\_image\_revert\_lifecycle test * Squelch the stevedore.extension debug logging in functional tests * Use correct import order in test\_api\_import\_image * Inspect upload/import stream and set virtual\_size * Stream-friendly disk format inspection module * Fix active image without data * Fix active image when all uploads fail * [goal] Migrate glance jobs to focal * Add "stores" to disallowed properties * Change database migration version to victoria 21.0.0.0b1 ---------- * Refresh Glance example configs for Victoria milestone 2 * Add new config options in sample config generator * Victoria milestone 2 release notes * Make wsgi\_app support graceful shutdown * Make image conversion use a proper python interpreter for prlimit * [Doc] Policy support to copy unowned images * Make glance-api able to do async tasks in WSGI mode * Fix release note formatting * Fix broken glance-cache-manage utility * Make our tempest job use import, standalone, and inject\_metadata * Fix admin docs deplying under HTTPD * Add image\_set\_property\_atomic() helper * Remove deprecated glance-replicator options * Remove unused "copy\_from" policy rule * Add a policy knob for allowing non-owned image copying * Update sample configs post deprecation removals * Cleanup remove api v1 and registry code * Don't use Stevedore 3.0.0 which breaks gate * Make import task capable of running as admin on behalf of user * Add context.elevated() helper for getting admin privileges * Refactor TaskFactory and Executor to take an admin ImageRepo * Add a functional test for non-owned image copying * Refactor common auth token code in images test * Fix metadefs for compute-watchdog * Drop collections.abc compat handling * Don't include plugins on 'copy-image' import * Add 'all' visibility filter for listing images in docs * Improve lazy loading mechanism for multiple stores * Removal of 'enable\_v2\_api' * Update lower-constraints versions * Add a test to replicate the owner-required behavior of copy-image * Deprecation cleanout Registry and related * Check authorization before import for image * Make test-setup.sh compatible with mysql8 * Fix: Interrupted copy-image leaking data on subsequent operation * Switch from unittest2 compat methods to Python 3.x methods * Imported Translations from Zanata * Use unittest.mock instead of mock * Remove configs and entries for deprecated registry * Exclude http store if --all-stores specified for import/copy operation * Use grenade-multinode instead of the custom legacy job * Stop to use the \_\_future\_\_ module * Switch to newer openstackdocstheme and reno versions * Imported Translations from Zanata * Cap jsonschema 3.2.0 as the minimal version * Add tests to lower-constraints job * Fix hacking min version to 3.0.1 * Imported Translations from Zanata * Use py38 instead of py37 jobs * Switch to new grenade job name * Fix a failure to parse json file * Bump default tox env from py37 to py38 * fix typo in gerrit doc * Imported Translations from Zanata * Add py38 package metadata * Imported Translations from Zanata * Add Python3 victoria unit tests * Update master for stable/ussuri 20.0.0.0rc1 ----------- * Use unittest.mock instead of third party mock * Skip 'test\_image\_member\_lifecycle\_for\_multiple\_stores' on failure * Imported Translations from Zanata * Add warning and note on image schema customization docs 20.0.0.0b3 ---------- * Install all deps in venv creation * Ussuri final release notes * Refresh Glance example configs for Ussuri milestone 3 * Deprecate admin\_role * Imported Translations from Zanata * Fix multiple image imports if boolean input passed as string * Add missing Image property hw\_vif\_multiqueue\_enabled to metadefs * Update OS::Glance::CommonImageProperties in metadefs * Update 'common image properties' doc * Add description of how to use S3 driver * Deprecate the 'checksum' image property * Do not decompress 'compressed' containers * Revise admin interoperable image import docs * Update uWSGI doc definition names * Add Policy enforcement for several Metadata Definition delete APIs * Remove all references to sheepdog * Update hacking and reenable local checks * Monkey patch original current\_thread \_active * Add deprecation message to policy "default" rule * Cleanup old cruft * Raise hacking to latest 2.0.0 release * Add decompression import plugin * Deprecate allow\_additional\_image\_properties * Remove usages of SSL related config options in utils * Add possibility to delete image from single store * Community Goal: Project PTL & Contrib Docs Update * Remove policy.json from setup.cfg * Reminder: show\_multiple\_locations still deprecated * Correct reference to 2 import methods * Document os\_admin\_user in Useful Image Properties * Multiple import fails if "all\_stores" specified as "true" * Use devstack ceph plugin python3 job * Amend tempest-irrelevant-files to avoid false positives * Revert "Remove all example configs for deprecated registry" * Remove all example configs for deprecated registry 20.0.0.0b2 ---------- * Refresh Glance example configs for Ussuri milestone 2 * Unit tests fails with oslo.config 7.0 * Copy existing image in multiple stores * Add ability to import image into multi-stores * Use application\_url in API version document * Configure Glance TCP socket after the store initialization * Ensure store ID parsed from URI is properly encoded * Use default cors config also when run as wsgi app * Remove empty policy.json * Fix string interpolation to delay by logging * Move policy defaults into code * Imported Translations from Zanata * Change database migration version to ussuri * Staging area not cleared if image is deleted while importing * doc: Clean up unnecessary left vertical lines 20.0.0.0b1 ---------- * Refresh Glance example configs for Ussuri milestone 1 * Remove registry related functional and unit tests * Remove py2 testing, jobs * Add classifiers of supporting py3 * Drop old neutron-grenade job * Remove native ssl support * Drop support for PY27 * Migrate grenade jobs to py3 * Imported Translations from Zanata * Fix old --public from install verify docs * Install requirements during the correct tox phase * Start README.rst with a better title * Use FakePolicyEnforcer for deserialization tests * Switch to opensuse-15 nodeset * Imported Translations from Zanata * Update master for stable/train 19.0.0.0rc1 ----------- * Set API version 2.9 CURRENT * Rethinking filesystem access * Refresh Glance example configs for Train * Use \`glance\` as example in docs instead of osc * Revert skipped functional tests * Add support for oslo.reports * Fix unit of hw\_rng:rate\_period * Revert "Add reserved stores to the sample config file" * Revert "Correct the deprecation messages of local dir config" * Fix DeprecationWarning: invalid escape sequence 19.0.0.0b1 ---------- * Add Train milestone 3 releasenotes * Remove OS::Compute::Trust metadef * Images API version bump and config sync * Correct the deprecation messages of local dir config * useful-image-properties: Update note on machine types * Add SEV-related extra spec and image properties * Add release notes for secret key deletion * Add periodic job to prefetch images into cache * Blacklist eventlet 0.23.0, 0.25.0 * Delete secret key on image deletion * fix properties' missing underline for VirtCPUTopology * Add disk format content from Image Guide * Release note for 'compressed' container format * Add 'compressed' option to container\_format * Fix INSTR not supported for postgresql * Adding member to image fails for multiple stores * MultiStore: Lazy update fails if image is not owned by owner * add metadef vlaues for new video models * add hw\_pmu metadef * Updating Ceph 404 URLs * doc: add link to release notes * Make location API compatible with multiple store * Lazy update stores information * Change location metadata key 'backend' to 'store' * Add reserved stores to the sample config file * Multiple backend support for scrubber * Add migration script to change backend to store * Skip tests to avoid trouble releasing store 1.0.0 * Update api-ref location * Image deletion returns 500 if 'file' store is not enabled * Run 'tempest-ipv6-only' job in gate * Replace "integrated-gate-py3" template with new "integrated-gate-storage" * Dropping the py35 testing * Update the glance installation page * Bump openstackdocstheme to 1.20.0 * Do not use glance\_store 0.29.0 * Remove glance-registry stuff * Modify the url of upper\_constraints\_file * Bring py27 tests back * MultiStore: Avoid potential KeyError in functional tests * Blacklist sphinx 2.1.0 (autodoc bug) * Add glance\_store tips to experimental jobs * Unit tests fails with new glance\_store version 0.29.0 * Update hw\_rng\_model image property doc * Remove additional " from config help * Update v2/info/stores api-ref * Mark http store read-only in discovery call * Update sphinx dependency * Don't target broken ssl tests to specific py3.minor * Added powervm as hypervisor type for image metadata prefiltering * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Pass kwargs to exception to get better format of error message * Use YAML anchor to avoid repeating irrelevant files * Switch to using stestr * add libpq-dev to build psycopg2 * Uncap jsonschema * Update CLI doc header for Stein * Update cache-manage documentation * Handle collections.abc deprecations * Quiesce assertAlmostEquals deprecation warning * Quiece 'invalid escape sequence' deprecation msg * Quiesce assertRaisesRegexp deprecation messages * Periodic jobs are failing for python 3.5 * Windows: fix flaky tests * Replace openstack.org git:// URLs with https:// * Imported Translations from Zanata * Add more irrelevant-files for integrated test jobs * Separate out configuration tests * Update master for stable/stein 18.0.0 ------ * Add irrelevant-files for integrated test jobs * Add Stein releasenotes * Stein example configs refresh * Update irrelevant files * Add description to common image properties * Data remains in staging area if 'file' store is not enabled * Allow glance tests to run on Windows * Windows multiprocess wsgi * glance Windows support * Update requirement platform checks * Migrate glance-dsvm-grenade-multinode job to Ubuntu Bionic * Imported Translations from Zanata * py3: Fix return type on CooperativeReader.read * Clarify the Glance/metadefs relationship * Document fileystem drv chunk size option * Failure in web-dowload kept image in importing state * Add job definition locations in comment * Add cache-manage utility using v2 API * Add new 'all' visibility filter for listing images * Add an oslo.policy.enforcer entrypoint * add python 3.7 unit test job * Fix DeprecationWarnings for RequestContext.tenant/user usage * Add test for data migration version * Open Stein for data migrations 18.0.0.0b1 ---------- * Update "Disallowed minor code changes" doc * Correct typo in config option choices * Use renamed template 'integrated-gate-py3' * Make QuotaImageTagsProxy deep-copyable * Update show\_multiple\_locations deprecation note * Guard \_\_getattr\_\_ on QuotaImageTagsProxy * Fix for FK constraint violation * Image conversion fails * Implement scaffolding for upgrade checks * Drop dependency on monotonic * Remove i18n.enable\_lazy() call from glance.cmd * Imported Translations from Zanata * Update mailinglist from dev to discuss * Add definition for hw\_time\_hpet image property * [doc] Fix options group for default\_backend * Add missing ws seperator between words * Update http with https * Embed validation data in locations * Imported Translations from Zanata * Make the link to release notes anonymous * py3: fix recursion issue under py37 * Remove moxstubout usage * Document os\_shutdown\_timeout image property * Increment versioning with pbr instruction * Fixed intermittent timeout/failing functional tests * Use tempest-pg-full * Refactor periodic "tips" jobs * Do not use Accept.best\_match * Replace openSUSE experimental check with newer version * Do not use oslo.messaging 9.0.0 * fix a typo in docstring * fix "it's" typos * Imported Translations from Zanata * Provision to add new config options in sample config file * Cleanup .zuul.yaml * add python 3.6 unit test job * switch documentation job to new PTI * Remove rootwrap.conf file from setup.cfg * Imported Translations from Zanata * import zuul job settings from project-config * Revert "Add a default rootwrap.conf file." * Add missing tests for 2.6 statuses * Use WebOb 1.8.1 * Remove qpid-python from test-requirements * Update Ubuntu install guide * Remove stub\_out\_registry\_server method * Update status to active when locations replaced * Imported Translations from Zanata * api-ref: "hidden" images update * Remove broken bandit from testing * Add multihash checks to functional tests * Support RFC1738 quoted chars in passwords * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/rocky * Update README * Update "Release Notes" in contributor docs * Don't run ssl-handshake job on doc changes 17.0.0.0rc1 ----------- * Add prelude to Rocky releasenotes * Introduce ''mock\_object'' method to base test class * Add release note for multi-store feature * Imported Translations from Zanata * api-ref: update version history * Handle StopIteration for Py3.7 PEP 0479 * Rename async package to async\_ * Add API version 2.8 * api-ref: make the discovery section more general * Sync Rocky example configs * Replace mox with mock * Correct typo in help text * Doc:Multiple stores support * Bump Imges API version to 2.7 * Unit/Functional tests for multi store support * Add multi-store support * Multihash implementation for Glance * Document hw\_cpu\_policy and hw\_cpu\_thread\_policy image properties 17.0.0.0b3 ---------- * Hide old images * Use glance.context.RequestContext in tests * Update glance documentation for trait support * Remove Images API v1 entry points * fix tox python3 overrides * replace cmp with total\_ordering decorator * Documentation for OSSN-0075 mitigation * Change default age of purge\_images\_table to 180 * Mitigate OSSN-0075 * Prevent taskflow creation in impossible import * useful-image-properties.rst: Update default RNG source * Add image conversion plugin 17.0.0.0b2 ---------- * Regenerate sample config files * Update Release CPL doc about periodic jobs * Use group\_regex to serialize scrubber tests * Replace Chinese punctuation with English punctuation * Remove deprecated 'enable\_image\_import' option * convert windows line endings to unix format * Add glance-eventlet-ssl-handshake-broken-py35 job * Convert to string before using assertIn * Add periodic tips jobs * Add py35 statement * Fix unreachable 'ImageSizeLimitExceeded' exception in image-upload * Update pypi url to new url * Refactor exception handling in cmd.api * update "auth\_url" in document * Fix wrong scheduler\_hints name in CIM::ProcessorAllocationSettingData * Prevent early exit from functional tests * Refactor wait\_for\_scrubber\_shutdown function * Run scrubber functional tests in serial mode * Add time-bounded wait-for-status to func tests * Add 'useful image properties' document * [api-ref] "Show images" should be changed to "List images" * Follow the OpenStack Sphinx theme configuration * Prepare for WebOb 1.8.1 * Add redirect capability to glance docs * Modify the empty list ensure method * Pending-delete rollback ability 17.0.0.0b1 ---------- * Cleaning image data when image signature verification fails * Imported Translations from Zanata * Add info about building the v1 api-ref * Follow the new PTI for document build * Fix incompatible requirement in requirement.txt * Migrate legacy job to project repository * Remove all usage of keystoneclient * Add fixture to limit unit test log output * Refactor scrubber functional test * Update auth\_uri option to www\_authenticate\_uri * Use 'Default' as sample install domain name * Update image schema with Image API 2.6 statuses * Functional tests for new Image Import API * Update auth\_url in install docs * add lower-constraints job * Fix web-download fails with default node\_staging\_uri * Make functional tests run by default * Imported Translations from Zanata * Make eventlet monkey patching conform to best practices * Python 3.5: Image Import fails with Unicode Error * Updated from global requirements * Change the outdated links to the latest links in README * Update Glance landing pages * Updated from global requirements * Deprecate owner\_is\_tenant * Updated from global requirements * Imported Translations from Zanata * Fix format in doc/source/cli/\* * Clean the web-download tests * Update Release CPL doc * Use config opt value to determine import methods * Delete the superfluous symbol of the command line * Add barbican-tempest experimental job * Open Rocky for data migrations * Unit tests for Web-Download import method * api-ref: correct typo * api-ref: fix list-resource-type-assocs example * Remove v1 from API ref * Imported Translations from Zanata * Remove v1 tests * Imported Translations from Zanata * Update installation guide: no uwsgi for glance * Update some url links of rally/README.rst * Hide Pike note on Queens Release Notes page * Triggers shouldn't be execute in offline migration * Revise help text for uri filtering options * Revise database rolling upgrade documentation * Migration support for postgresql * Correct length limit for custom property value * Use oslo\_db.sqlalchemy.test\_fixtures * Fix config group not found error * api-ref: update interoperable image import info * Updated from global requirements * Update Queens info about Glance and uWSGI * Remove use of mox/mox3 * Imported Translations from Zanata * Revise interoperable image import documentation * Update admin docs for web-download import method * URI filtering for web-download * Add validation to check if E-M-C is already in sync * Imported Translations from Zanata * Cleanup basic import tasks * Make the Image status transition early * Use bool instead of int for boolean filter value * image-guide: Update cirros image * Limit default workers to 8 * Imported Translations from Zanata * Fix format cache.rst * Imported Translations from Zanata * Use configured value for import-methods header * Fix bad usage of extend in list\_image\_import\_opts * Revert "Fix wrong usage of extend in list\_image\_import\_opts" * Offline migration support for postgresql * Fix wrong usage of extend in list\_image\_import\_opts * Imported Translations from Zanata * Update reno for stable/queens 16.0.0.0rc1 ----------- * Update Queens metadefs release note * Update api-ref for v.2.6 * Add release note for API v2.6 * Align Vers Neg Middleware to current API * Implementation of db check command * Decouple Image Import Plugin Opts * Revise import property injection plugin releasenote * Correct 1-character typo * Release note for Queens metadefs changes * Regenerate sample configuration files * Exiting with user friendly message and SystemExit() * correct grammar, duplicate a found * Modify glance manage db sync to use EMC * Add img\_linked\_clone to compute vmware metadefs * Handle TZ change in iso8601 >=0.1.12 * Replace xml defusedxml * Replace base functional tearDown with addCleanup * Add functional test gates * Skip one functional test * Fix py27 eventlet issue <0.22.0 * Fix pip install failure * Execute py35 functional tests under py35 environment * Enable Image Import per default and make current * Adds 'web-download' import method * Updated from global requirements * Skip one functional test * Use addOnException to capture server logs on failure * Separate out functional tests * Update Signature Documentation * Add doc8 to pep8 check for glance project * Implementation of Inject metadata properties * Resolve unit test failures with going to oslo.serialization 2.3.0 16.0.0.0b3 ---------- * Updated from global requirements * Updated from global requirements * Add documentation for image import plugins * Update scrubber documentation * [doc] Modify the description for the command * Scrubber refactor * Add hooks for Image Import plugins * Updated from global requirements * Fix 500 if custom property name is greater than 255 * Fix member create to handle unicode characters * [import-tests] adds tests for image-import/staging * Updated from global requirements * [import-tests] Enhance image import tests * Add fixture to only emit DeprecationWarning once * Move 'upload\_image' policy check to the controller * Fix 500 from duplicate stage call * Updated from global requirements * Prevent image become active without disk and container formats 16.0.0.0b2 ---------- * Fix 500 on ValueError during image-import * Update the documentation links * Update the valid disk bus list for qemu and kvm hypervisors * Add the list of hw\_version supported by vmware driver * Updated from global requirements * Utilize LimitingReader for staging data * Fix 500 from image-import on 'active' image * Fix 500 from stage call on non-existing image * Fix unstage after staging store denies write * Updated from global requirements * Delete data if image is deleted after staging call * Fix 500 from image-import on queued images * Use new oslo.context arg names * Use new oslo.db base test case * Fix the wrong URL * Correct related section for enable\_image\_import * Fix SQLAlchemy reference link * Remove setting of version/release from releasenotes * Updated from global requirements * Fix format of configuration/configuring.rst * Removing unreachable line from stage() method * Wrong description in ImageMembersController.update * Updated from global requirements * Updated from global requirements * Correct sphinx syntax of glance doc * Update http deploy docs to be a bit more explicit * Clarify log message * Updated from global requirements * Document new URL format * Update api-ref about 403 for image location changes 16.0.0.0b1 ---------- * Make ImageTarget behave like a dictionary * Document Glance Registry deprecation * Replace body\_file with class to call uwsgi.chunked\_read() * tests: replace .testr.conf with .stestr.conf * Deprecate Registry and it's config opts * Update spec-lite info in contributors' docs * Fix 500 if user passes name with more than 80 characters * Remove use of deprecated optparse module * Replace DbMigrationError with DBMigrationError * Clean up api-ref index page * Updated from global requirements * Fix a typo in swift\_store\_utils.py: replace Vaid with Valid * TrivialFix: Fix wrong test case * Revert "Remove team:diverse-affiliation from tags" * Update image statuses doc for latest change * Update Rally Job related files * Add default configuration files to data\_files * Switch base to latest in link address * Align default policy in code with the one in conf * Fix missing some content of glance database creation * Updated from global requirements * Updated from global requirements * Clean up database section of admin docs * Add image import docs to admin guide * Updated from global requirements * Avoid restarting a child when terminating * Open Queens for data migrations * Change variable used by log message * api-ref: add 'protected' query filter * Update invalid links of User doc * Separate module reference from contributor/index page * Updated from global requirements * Optimize the way to serach file 'glance-api-paste.ini' * Fix api\_image\_import tasks stuck in 'pending' * Alembic should use oslo\_db facades * Correct group name in config * api-ref: add interoperable image import docs * Add release note for Glance Pike RC-2 * Fix Image API 'versions' response * Updated from global requirements * Return 404 for import-info call * Add 'tasks\_api\_access' policy * Add 'api\_image\_import' type to task(s) schemas * Update invalid path and link for Image Properties * Fix 500 error from image-stage call * Fix 500 error from image-import call * Imported Translations from Zanata * Update reno for stable/pike 15.0.0.0rc1 ----------- * Refresh config files for Pike RC-1 * Add release note for RC-1 including metadefs changes * Updated from global requirements * Add the missing i18n import * Bump Images API to v2.6 * api-ref: update container\_format, disk\_format * Update the documention for doc migration * Create image fails if 'enable\_image\_import' is set * Updated from global requirements * Add a default rootwrap.conf file 15.0.0.0b3 ---------- * Updated from global requirements * Fix typo in discovery API router * Updated from global requirements * Add release note for wsgi containerization * Remove team:diverse-affiliation from tags * Updated from global requirements * Add Discovery stub for Image Import * Update URL home-page in documents according to document migration * Satisfy API Reference documentation deleting tags * Fix glance image-download error * Handle file delete races in image cache * doc: Explicitly set 'builders' option * Add 'protected' filter to image-list call * Remove unused None from dict.get() * Updated from global requirements * Remove unused parameter from 'stop\_server' method * use openstackdocstheme html context * update doc URLs in the readme * only show first heading from the glossary on home page * move links to older install guides to the current install guide * switch to openstackdocstheme * Fix trust auth mechanism * import the cli examples from the admin guide in openstack-manuals * import troubleshooting section of admin guide from openstack-manuals * import the installation guide from openstack-manuals * import the glossary from openstack-manuals * turn on warning-is-error for sphinx build * Remove datastore\_name and datacenter\_path * Clean up the redundant code * Imported Translations from Zanata * Add metadefs release note for Pike * Updated from global requirements * do not declare code blocks as json when they do not parse * use :ref: instead of :doc: for xref * add index page to cli dir * fix image path * fix include directives * fix repeated hyperlink target names * fix the autodoc instructions * rearrange existing documentation to follow the new layout standard * Make i18n log translation functions as no-op * Remove unused variable * Tests: Remove the redundant methods * ignore generated sample config files * Fix broken link to the "Image service property keys" doc * Add docs and sample configs for running glance with apache * Add pbr wsgi script entrypoint to glance * Add external lock to image cache sqlite driver db init * Updated from global requirements * Remove use of config enforce\_type=True 15.0.0.0b2 ---------- * Updated from global requirements * Updated from global requirements * Remove duplicate key from dictionary * Updated from global requirements * Stop enforcing translations on logs * Remove usage of parameter enforce\_type * Add import endpoint to initiate image import * Add images//staging endpoint * Addresses the comments from review 391441 * Fixed PY35 Jenkins Gate warnings * Updated from global requirements * Add a local bindep.txt override * Add hide hypervisor id on guest host * Updated from global requirements * Updated from global requirements * Clean up py35 env in tox.ini * Trivial fix * Fix periodic py27 oslo-with-master test * Add OpenStack-image-import-methods header * Updated from global requirements * Fix wrong overridden value of config option client\_socket\_timeout * Remove test\_unsupported\_default\_store * Support new OSProfiler API * WIP:Add api\_image\_import flow * Change keystoneclient to keystoneauth1 * Clean up acceptable values for 'store\_type\_preference' * Fix the mismatch of title and content * Fix vmware option for glance\_store * Add node\_staging\_uri and enable\_image\_import opts * Fix doc generation for Python3 * Fix tests when CONF.set\_override with enforce\_type=True * Updated from global requirements * Document the duties of the Release CPL * Dev Docs for Writing E-M-C Migrations * Updated from global requirements 15.0.0.0b1 ---------- * Fix and enable integration tests on py35 * Update api-ref for Range request support * Do not serve partial img download reqs from cache * Updated from global requirements * Add release note for bug 1670409 * Accept Range requests and set appropriate response * Provide user friendly message for FK failure * Updated from global requirements * Use cryptography instead of pycrypto * Fix incompatibilities with WebOb 1.7 * Fix some reST field lists in docstrings * Fix some reST field lists in docstrings * Fix rendering of list elements * Fix and enable two funcitonal tests on py35 * Replace master/slave usage in replication * Fix and enable remaining v1 tests on py35 * Fix and enable test\_cache\_middleware test on py35 * Invoke Monkey Patching for All Tests * Update vmware metadef with ESX 6.5 supported OSes * Remove the remaining code after glare-ectomy * Invoke monkey\_patching early enough for eventlet 0.20.1 * correct "url" to "URL" * Fix Unsupported Language Test * Updated from global requirements * Use HostAddressOpt for opts that accept IP and hostnames * Fix experimental E-M-C migrations * Update man pages to Pike version and release date * Fix filter doesn't support non-ascii characters * Remove glare leftovers from setup.cfg * Fix api-ref with Sphinx 1.5 * Updated from global requirements * Restore man pages source files * Update test requirement * Glare-ectomy * Limit workers to 0 or 1 when using db.simple.api * Updated from global requirements * Restore Legacy Database Management doc * Open Pike for data migrations * Change identifiers in data migration tests * [docs] Removing docs from dev ref * Mock CURRENT\_RELEASE for migration unit test * Fix up links to static content in sample-configuration * Cleanup 'ResourceWarning: unclosed file' in py35 * Fix scrubber test failing py35 gate * Update developer docs for rolling upgrades * Updated from global requirements * Fix brackets to suggest optionality * Use https instead of http for git.openstack.org * Updated from global requirements * Prevent v1\_api from making requests to v2\_registry * Prepare for using standard python tests * Update reno for stable/ocata 14.0.0 ------ * Refresh config files for Ocata RC-1 * Alembic migrations/rolling upgrades release note * Add expand/migrate/contract migrations for CI * Add expand/migrate/contract commands to glance-manage CLI * Refactor tests to use Alembic to run migrations * Port Glance Migrations to Alembic * Handling scrubber's exit in non-daemon mode * Correct 2.5 minor version bump release note * Update api-ref for image visibility changes * refactor glare plugin loader tests to not mock private methods of stevedore * Refine migration query added with CI change * Hack to support old and new stevedore * do not mock private methods of objects from libraries * Update deprecated show\_multiple\_locations helptext * Add release note for image visibility changes * Update api-ref for partial download requests * Updated from global requirements 14.0.0.0b3 ---------- * Eliminate reference to metadefs 'namespace\_id' * Updated from global requirements * Add image update tests for is\_public * Fix regression introduced by Community Images * Bump minor API version * DB code refactor, simplify and clean-up * Properly validate metadef objects * Implement and Enable Community Images * Fix NameError in metadef\_namespaces.py * Update to "disallowed minor code changes" * remove useless EVENTLET\_NO\_GREENDNS * Updated from global requirements * Adjust test suite for new psutil versions * Update dev docs to include 'vhdx' disk format * Change SafeConfigParser into ConfigParser * Image signature documentation modify key manager api class * Log at error when we intend to reraise the exception * Remove obsolete swift links * Updated from global requirements * Add ploop to supported disk\_formats * Updated from global requirements * Fix some typos in api-ref * Update sample config files for Ocata-3 * Enable python3.5 testing * Update tox configuration file to reduce duplication * Expand hypervisor\_type meta data with Virtuozzo hypervisor * Remove v3 stub controller * Updated from global requirements 14.0.0.0b2 ---------- * Skipping tests for location 'add', 'replace' on 'queued' images * Editing release note for location update patch * Change cfg.set\_defaults into cors.set\_defaults * Restrict location updates to active, queued images * Allow purging of records less than 1 day old * Updated from global requirements * Updated from global requirements * Updated from global requirements * Python3: fix glance.tests.functional.v2.test\_images * Python 3: fix glance.tests.functional.v1.test\_misc * Python3: fix glance.tests.functional.test\_scrubber * Python3: fix logs/glance.tests.functional.test\_healthcheck\_middleware * Python3: Fix glance.tests.functional.test\_glance\_replicator * Python3: Fix glance.tests.functional.test\_bin\_glance\_cache\_manage * Python 3: fix glance.tests.functional.db.test\_sqlalchemy * Python3: fix test\_client\_redirects.py * Add working functional tests to tox.ini * Add alt text for badges * Correct releasenote "Prepare for oslo.log 3.17.0" * Prepare for oslo.log 3.17.0 * Show team and repo badges on README * Handling HTTP range requests in Glance * Remove uneccessary "in" from CONTRIBUTING.rst 14.0.0.0b1 ---------- * Updated from global requirements * IPv6 fix in Glance for malformed URLs * Updated from global requirements * Update api-ref with 409 response to image update * Added overwrite warning for db\_export\_metadefs * Allow specifying OS\_TEST\_PATH (to reduce tests ran) * Do not use service catalog for cache client * Added unit tests for disabled notifications in Notifier * Updated from global requirements * Updated from global requirements * ping\_server: Always close the socket * Remove mox3 in test-requirement.txt * Correct url in doc source * Updated from global requirements * Add DeprecationWarning in test environments * Updated from global requirements * Update .coveragerc after the removal of openstack directory * Updated from global requirements * Drop unused import cfg * Imported Translations from Zanata * Image signature documentation modify barbican auth\_endpoint * Add libvirt image metadef for hw\_pointer\_model * Drop MANIFEST.in - it's not needed by pbr * Add more resource url in readme.rst * Updated from global requirements * Cleanup newton release Notes * Imported Translations from Zanata * Fix Domain Model code example * Imported Translations from Zanata * Remove redundant word * Enable release notes translation * Updated from global requirements * Extracted HTTP response codes to constants in tests * Extracted HTTP response codes to constants * Updated from global requirements * Fix typo: remove redundant 'the' * dev-docs: mark v1 as deprecated * Updated from global requirements * Updated from global requirements * Correct releasenote for Ib900bbc05cb9ccd90c6f56ccb4bf2006e30cdc80 * Updated from global requirements * [api-ref] configure LogABug feature * Update CONTRIBUTING.rst * Adding constraints around qemu-img calls * Correct the order of parameters in assertEqual() * Fixing inconsistency in Glance store names * change the example URLs in api-ref for Glance * Updated from global requirements * api-ref: deprecate images v1 api * Remove unused oslo.service requirement * Update api-ref to add newly supported 'vhdx' disk format option * Fix incorrect call for \_gen\_uuid * Update description of image\_destroy method * Update reno for stable/newton 13.0.0.0rc1 ----------- * Complete and update Newton release notes * api-ref: add versions history * Correctly point to Task Statuses from Tasks doc * Updated from global requirements * Fix cursive named arguments * TrivialFix: Remove unused variable * Fix nits from commit that introduces cursive * Dev-docs: command in code block for refresh config * Bump up Glance API minor version to 2.4 * [api-ref] Remove temporary block * Add note to docs on release notes prelude section * Fixed indentation * Fix a small markup typo * Remove self.\_\_dict\_\_ for formatting strings * Keep consistent order for regenerated configs 13.0.0.0b3 ---------- * Regenerate config files for Newton * Improving help text for common-config opts * Improving help text for data access API option * Improving help text for Glance common-config opts * Remove DB downgrade * Release note for glance config opts * Improve help text of glance config opts * Attempt to not set location on non active or queued image * Improving help text for WSGI server conf opts * Use cursive for signature verification * Updated from global requirements * Improving help text for metadefs config option * Improve the help text for registry client opts * Improving help text for send\_identity\_headers opt * Remove unused requirements * Fix using filter() to meet python2,3 * Remove "Services which consume this" section * Deprecate \`show\_multiple\_locations\` option * Image signature base64 don't wrap lines * Deprecate the Images (Glance) v1 API * Improving help text of v1/v2 API & Registry opts * Improve help text of scrubber daemon option * Improving help text for RPC opt * Improving help text for image conversion\_format * Updated from global requirements * Updated from global requirements * TrivialFix: Remove cfg import unused * Improving help text for store\_type\_preference opt * Improving help text for Notifier opts * Removing deprecated variable aliases from oslo\_messaging * Improve help text of scrubber opts * Correct link to image properties * Use upper constraints for all jobs in tox.ini * Fix five typos on doc * Improve help text of quota opts * Improve help text of registry server opts * Get ready for os-api-ref sphinx theme change * Add registry\_client\_opts to glance-cache.conf.sample * Updated from global requirements * Add CPU thread pinning to metadata defs * Stop stack tracing on 404s * Don't use config option sqlite\_db * Index to generate doc page for refreshing-configs * Add guideline to refresh config files shipped with source * Add example for diff between assert true and equal * Updated from global requirements * Remove references of s3 store driver * Add test class to versions tests * change the example URLs in dev-docs for Glance * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix use of etc. in metadefs docs * Improving help text for location\_strategy opt * Use more specific asserts in unit tests * Add a requirements guidelines to docs * api-ref: correct versions response example * Updated from global requirements * Version negotiation api middleware to include v2.3 * Add release notes for newton-1 * Remove deprecated test utility * Some migrations tests incorrectly ref S3 for Swift * Remove extraneous ws in architecture docs * Refresh some config files based on bug fixes * Generate and include sample config as part of docs * Wrap text in sample configuration files at 80 * Improving help text for proprty utils opts * Updated from global requirements * Improving help text for swift\_store\_utils opts * cache\_manage: fix a print bug in exit main * replicator: dump: Display more info * replicator: livecopy: Display more info * Updated from global requirements * Add ova to container format doc to rally plugin * Add 'vhdx' disk format * Add 'ova' as a container\_format in dev-docs * Update sqlalchemy-migrate url * Improving help text for taskflow executor opts * Minor tweak to release note documentation * Replace OpenStack LLC with OpenStack Foundation * api-ref: Replace image-update response example * api-ref: Refresh images schemas * Correcting description of image\_update API method * Making Forbidden Exception action oriented * Updated from global requirements * Make docs copyright consistent * Add LOG.warning to Disallowed minor changes * WADL to RST migration (part 2 - images) * Updated from global requirements * Improving help text for context middleware opts 13.0.0.0b2 ---------- * Add \_\_ne\_\_ built-in function * Replace "LOG.warn(\_" with "LOG.(\_LW" * Updated from global requirements * Cleanup i18n marker functions to match Oslo usage * Use oslo.context features * glance-replicator: size: Handle no args better * WADL to RST migration (part 2 - metadefs) * Remove unused LOG to keep code clean * Nitpick spell change * Correct reraising of exception * Perform a cleanup of configuring.rst * Fix duplicated osprofile config for registry * replicator: size: Display human-readable size * Return 400 when name is more than 255 characters * glance-replicator: compare: Show image name in msg * Use MultiStrOpt instead of ListOpt for args * Updated from global requirements * Improving help text for public\_endpoint * Add image signature verification metadefs * Add signed images documentation * Glance tasks lost configuration item conversion\_format * Update to Glance Contributor's docs * WADL to RST migration (part 2 - tasks) * Updated from global requirements * Updated from global requirements * WADL to RST migration (part 1) * Add documentation about generating release notes * Change default policy to admin * Fix bug Swift ACL which disappears on Glance v1 images * Do not set header if checksum doesn't exist * Updated from global requirements * Fixes the use of dates when listing images * Use olso\_log and delay string interpolation while logging * Add in missing log hints * Use http-proxy-to-wsgi middleware from oslo.middleware * Updated from global requirements * Imported Translations from Zanata * Add a soft delete functionality for tasks * Update man pages to current version and dates * Incorrect title for Outbound Peak * Updated from global requirements 13.0.0.0b1 ---------- * Remove redundant store config from registry sample * Remove TODOs from deprecated "sign-the-hash" * Updated from global requirements * Fix import of profiler options * Add check to limit maximum value of max\_rows * Updated from global requirements * Updated from global requirements * Remove verbose option from glance tests * Raise exception when import without properties * Excluded the 'visibility' from protected artifact fields * Use OSprofiler options consolidated in lib itself * Remove unnecessary executable permissions * Updated from global requirements * Normalize the options use singele quotes * Updated from global requirements * Updated from global requirements * Allow tests to run when http proxy is set * Correct some misspelt words in glance * Clarify language used in glanceapi future section * Images APIs: The Future * Remove old \`run\_tests\` script * Updated from global requirements * Remove unnecessary executable privilge of unit test file * Updated from global requirements * Functional test comparing wrong items * Contribution doc change for spec-lite * Updated from global requirements * Improve help text of image cache opts * Remove deprecated "sign-the-hash" approach * Imported Translations from Zanata * Updated from global requirements * Return BadRequest for 4 byte unicode characters * Log when task is not configured properly * Corrected section underline * Give helpful error in tests if strace is missing * Adding detailed alt text to images for accessibility * Changed the spelling of opsrofiler to osprofiler * Fix doc build if git is absent * Increase max wait time, avoid racy failure in gate * Updated from global requirements * Add store opts to scrubber and cache sample conf * Add wsgi options to the sample options * Removed one extra enter key * use os-testr instead of testr * Updated from global requirements * Modified message of exception and log * Given space in between two words * Use messaging notifications transport instead of default * Updated from global requirements * Update the Administrator guide links with new ones * Imported Translations from Zanata * Use roles attribute from oslo context * Updated from global requirements * Fix doc-strings warnings and errors * Add 'Documentation' section to 'Contributing' docs 12.0.0 ------ * Imported Translations from Zanata * Fix typos in Glance files * Imported Translations from Zanata * Fix db purge type validation * Imported Translations from Zanata * Copy the size of the tag set * Changes behaviour when an image fails uploading * Imported Translations from Zanata * Handle SSL termination proxies for version list * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Fixed typos in two comments * Update reno for stable/mitaka * Update .gitreview for stable/mitaka 12.0.0.0rc1 ----------- * Fix possible race conditions during status change * fix docstring warnings and errors * revert warnerrors before gate breaks * Fix link to document * Imported Translations from Zanata * Update the configuration doc * Catch exceptions.HasSnapshot() from delete image in rbd driver * Imported Translations from Zanata * register the config generator default hook with the right name * Reject bodies for metadef commands * Remove unused enable\_v3\_api config option * glance-manage db purge failure for limit * Imported Translations from Zanata * Remove state transition from active to queued * Imported Translations from Zanata * Updated the wording in the database architecture docs * Test tag against schema to check length * Update the config files * Imported Translations from Zanata * Adds virtual\_size to notifications * Update configuring of Cinder store * Add debug testenv in tox * Fix levels of Swift configuration documentation * no module docs generated * Deprecate use\_user\_token parameter * Creating or updating a image member in a list causes 500 * Updated from global requirements * Updating comment in tests/unit/test\_migrations.py * Fix update all props when you delete image 12.0.0.0b3 ---------- * Fix location update * Moved CORS middleware configuration into oslo-config-generator * Use assertGreater/Less/Equal instead of assertTrue(A \* B) * New metadata definitions from CIM * Add support for DSA signatures * Fix message formatting in glance-manage purge * Updated from global requirements * Remove unused pngmath sphinx extension * Do not use constraints for venv * Fix BaseException.message deprecation warnings * Remove py33 from tox envlist * Resolve i18n and Sphinx issues in signature\_utils * Add support for ECC signatures * Return 204 rather than 403 when no image data * Move bandit into pep8 * Updated from global requirements * Support importing OVA/OVF package to Glance * Always use constraints * Updated from global requirements * Include registry\_client\_\* options in glance-scrubber.conf * Python 3: fix a few simple "str vs bytes" issues * remove redundant "#!/usr/bin/env python" header * Encourage usage of identity API v3 * Python 3: fix glance.tests.functional.db.simple * Reuse encodeutils.to\_utf8() * Fix OpenSSL DeprecationWarning on Python 3 * Added support new v2 API image filters * Add sign-the-data signature verification * Stop gridfs driver support * Updated from global requirements * Set self and schema to readOnly * Make sure the generated glance-api.conf.sample is always the same * Add unit test for default number of workers * Replace assertRaisesRegexp with assertRaisesRegex * Reuse jsonutils.dump\_as\_bytes() * Do not log sensitive data * Cache documentation about differences in files * Tolerate installation of pycryptodome * grammar correction in basic architecture file * Promote log message to exception level on artifact load failure * Allow mutable argument to be passed to BinaryObject artifacts * Include version number into glare factory path in paste * Fix 500 status code when we add in "depend\_on" yourself * Unallowed request PATCH when work with blob * Use keystoneclient functions to receive endpoint * Drop python 2.6 support * Move Glance Artifact Repository API to separate endpoint * Imported Translations from Zanata * Imported Translations from Zanata * clean up auto-generated docs for configuration options * Update the home page * Updated from global requirements * Misspelling in message * v2 - "readOnly" key should be used in schemas * Prevent user to remove last location of the image * Fix \_wait\_on\_task\_execution() * Updating message for conversion\_format cfg\_opt * Fix setup.cfg * Replace exit() by sys.exit() * Change Metadefs OS::Nova::Instance to OS::Nova::Server * Change exception format checks in artifact tests * Imported Translations from Zanata * Remove glance\_store specific unit tests * Encode headers to launch glance v2 on mod\_wsgi * Make the task's API admin only by default * No need to have async executor fetching be a contextmanager * Updated from global requirements * Python 3: fix glance.tests.unit * Add storage\_policy VMware driver option for flavors * Remove unneeded glance unit test assert calls * utils: remove PrettyTable custom class in favor of the eponym libary * Hacking checks for not using dict iteration calls * Add note in comment where upstream taskflow change is * Fix for Image members not generating notifications * Updated from global requirements * Generate page of all config options in docs * Use oslo.utils exception encoding util * Add hacking check to ensure not use xrange() * Updated from global requirements * Fix help command in cache manange and replicator * Add properties\_target to Instance SW metadefs * Simplify taskflow engine loading * Allow image-list if access to attrs is forbidden * [docs] Add Domain model implementation sub-section * Drop dict.iterkeys() for python3 * Fix re-adding deleted members to an image in v1 * Replace xrange() with six.moves.range() 12.0.0.0b2 ---------- * Add metadefs for Cinder volume type configuration * Python3: Replace dict.iteritems with dict.items * Enhance description of instance-uuid option for image-create * Make cache config options clearer * Imported Translations from Zanata * Update links for CLI Reference * Python3: fix operations of dict keys() * Implement trust support for api v2 * Imported Translations from Zanata * Fix the wrong options in glance-api and glance-registry confs * Do not use api-paste.ini osprofiler options * Update the cache documentation * Updated from global requirements * Catch UnsupportedAlgorithm exceptions * Add functionality to define requests without body * Updated from global requirements * Use six.moves.reduce instead of builtin reduce * Fixing the deprecated library function * Remove Indices and tables section * Remove unused logging import * Fix Glance doesn't catches UnicodeDecodeError exception * Updated from global requirements * assertIsNone(val) instead of assertEqual(None,val) * Fix glance doesn't catches exception NotFound from glance\_store * Deprecated tox -downloadcache option removed * Wait all wsgi server completion for worker exit * Fix model sync for SQLite * Update the cache middleware flavor guideline * Add sign-the-hash deprecation warning * Add db purge command * Replace oslo\_utils.timeutils * Add missing CPU features to Glance Metadata Catalog * Updated from global requirements * Remove iso8601 dependency * Assert problems in Glance raised by Bandit * Import i18n functions directly * Validate empty location value for v1 api * Updated from global requirements * Added CORS support to Glance * Capitalize 'glance' in db.rst * Stop using tearDown in v1/test\_api.py * Fix return 200 status code when we operate with nonexistent property * Fix default value with postgreSQL * Rename glance-store to glance\_store * Run py34 env first when launching tests * Move store config opt to glance\_store section * Remove artifact entry point * Remove version from setup.cfg * Add the Docker container format * Change the format of some inconsistent docstring 12.0.0.0b1 ---------- * Updated from global requirements * Automated bandit checks in glance * Port \_validate\_time() to Python 3 * Updated from global requirements * Support Unicode request\_id on Python 3 * Unicode fix in BaseClient.\_do\_request() on py3 * Fix incorrect task status with wrong parameter * Document contribution guidelines * Updated from global requirements * Fix glance.tests.unit.v1.test\_registry\_client * Fix sample Rally plugin * force releasenotes warnings to be treated as errors * V1: Fix bad dates returning 500 * Fix 500 error when filtering with specified invalid operator * Fix 500 error when filtering by 'created\_at' and 'updated\_at' * Update os.path.remove as it does not exist * Change the default notification exchange to glance * Add documentation for configuring disk\_formats * V1: Stop id changes after the image creation * Format log messages correctly * [docs] Update description of Glance-Swift conf options * Disallow user modifing ACTIVE\_IMMUTABLE of deactivated images * [docs] Update Glance architecture image * test: make enforce\_type=True in CONF.set\_override * OpenStack typo * Support new v2 API image filters * Remove anyjson useless requirement * Python3: fix glance.tests.unit.v2.test\_registry\_client * Location add catch bad Uri * [docs] delete duplicated image\_status\_transition.png * Reactivating admin public image returns 500 * Python3: fix glance.tests.unit.test\_migrations * Python3: fix test\_image\_data\_resource * Remove todo to remove /versions * Python3: fix test\_registry\_api * Updated from global requirements * Fix typos in configuring.rst * Python3: fix glance.tests.unit.v2.test\_images\_resource * add "unreleased" release notes page * Python 3: Fix glance.tests.unit.v2.test\_tasks\_resource * Python 3: fix test\_image\_members\_resource * Remove default=None for config options * Update style for signature\_utils class * Add -constraints for CI jobs * Add a deprecation warning to the DB downgrade * Remove unused exceptions from glance * Add tasks info to glance documentation * Add reno for release notes management * Add subunit.log to gitignore * Updated from global requirements * Fix content type for Forbidden exception * Port v1.test\_registry\_api to Python 3 * Remove requests to example.com during unit testing * Port signature\_utils to Python 3 * Imported Translations from Zanata * Rename semantic-version dep to semantic\_version * Port script utils to Python 3 * Use dict comprehension * Typo fix * Updated from global requirements * Port test\_cache\_manage to Python 3 * Port test\_wsgi to Python 3 * Updated from global requirements * Fix incorrect Glance image metadata description * Rename glance-store dep to glance\_store * Remove glance\_store from exta requirements * Port async tests to Python 3 * Fixed registry invalid token exception handling * Updated from global requirements * Add more tests which pass on Python 3 * Show the file name when there is an error loading an image metadef file * Remove the duplicate file path created by sphinx build * [docs] Adds new image status - deactivated * Cause forbidden when deactivating image(non-admin) * Updated from global requirements * Don't allow queries with 'IN' predicate with an empty sequence * utils: use oslo\_utils.uuidutils * utils: remove unused functions in glance.utils * Bodies that are not dicts or lists return 400 * Pass CONF to logging setup * Fix 500 error when filtering by invalid version string * Fix error when downloading image status is not active 11.0.0 ------ * Add 'deactivated' status to image schema * Allow owner to be set on image create * Decrease test failure if second changes during run * config: remove default lockutils set * Catch InvalidImageStatusTransition error * Port rpc and wsgi to Python 3 * Refactoring exceptions * Fix glance ignored a headers when created artifact * Add ability to specify headers in PUT/PATCH request in functional tests * Fix 500 error when we specify invalid headers when work with blob/bloblist * fix a typo in show\_multiple\_locations help message * Updated from global requirements * Add testresources and testscenarios used by oslo.db fixture * Add testresources and testscenarios used by oslo.db fixture * Add 'deactivated' status to image schema * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * Fix 409 response when updating an image by removing read-only property 11.0.0.0rc2 ----------- * Imported Translations from Zanata * Updated from global requirements * Port api test\_common to Python 3 * An explicit check for the presence of a property * Cleanup chunks for deleted image if token expired * Download forbidden when get\_image\_location is set * Download forbidden when get\_image\_location is set * tell pbr to tell sphinx to treat warnings as errors * add placeholder to ensure \_static directory exists * add the man pages to the toctree * escape underline introducing a spurrious link reference * do not indent include directives * add missing document to toctree * fix restructuredtext formatting errors * Catch NotAuthenticated exception in import task * Cleanup chunks for deleted image if token expired * Catch NotAuthenticated exception in import task * Imported Translations from Zanata * Return missing authtoken options * Change string generation mechanism for info logging * Add Large pages meta definition * Return missing authtoken options * Fix mutable defaults in tests * Imported Translations from Zanata 11.0.0.0rc1 ----------- * Open Mitaka development * Cleanup of Translations * Remove redundant requirements.txt from tox * Add swiftclient to test-requirements * Updated from global requirements * Update Glance example configs to reflect Liberty * Imported Translations from Zanata * Fix server start ping timeout for functional tests * Prevent image status being directly modified via v1 * Fixed the output of list artifacts API calls * Change ignore-errors to ignore\_errors * Prevent extraneous log messages and stdout prints * [Glance Developer Guide] Grammar edits * utils: stop building useless closure * Remove \`openstack' directory * Imported Translations from Zanata * Fixes the possibility of leaving orphaned data * Add missing function '\_validate\_limit' * Fix wrong parameters order in Task * Remove WARN log message from version\_negotiation * Fix order of arguments in assertEqual * Scrub images in parallel * Make task\_time\_to\_live work * Incorrect permissions on database migration file * Add \_member\_ to property-protections-roles.conf.sample * Domain model section * Add unit tests for signature\_utils class * Scrubber to communicate with trustedauth registry * Corrected hyperlink in metadefs documentation * Remove pointless tests comparing opts against list * Remove old traces of the oslo-incubator * Updated from global requirements * Use oslo utils to encode exception messages * clean up requirements 11.0.0.0b3 ---------- * Disable v3 API by default * Glance metadef tables need unique constraints * Add image signing verification * Don't return 300 when requesting /versions * Updated from global requirements * Use min and max on IntOpt option types * Fixed non-owner write-access to artifacts * Remove WritableLogger from wsgi server * Allow to filter artifacts by range * Fixed version unequality artifact filtering * Artifacts are now properly filtered by dict props * Fixed an HTTP 500 on artifact blob upload * Port rally scenario plugin to new Rally framework * Use stevedore directive to document plugins * Catch update to a non-existent artifact property * Fix spelling mistake in test\_images.py * Change URL to End User Guide * Fix URLs to admin-guide-cloud * reuse the deleted image-member before create a new image-member * Imported Translations from Transifex * Add CPU Pinning in metadata definitions * Fix image owner can't be changed issue in v2 * Port common.utils to Python 3 * Port store image to Python 3 * Port replicator to Python 3 * Port glance.db to Python 3 * Port image cache to Python 3 * Fix Python 3 issues in glance.tests.unit.common * Don't use slashes for long lines - use parentheses instead * Updated from global requirements * Imported Translations from Transifex * Don't import files with backed files * Use oslo\_config PortOpt support * Setting default max\_request\_id\_length to 64 * Add mechanism to limit Request ID size * return request\_id in case of 500 error * Remove no longer used parameter (FEATURE\_BLACKLIST) * Fixed few typos * Correct the indentation on a few functions * Use dictionary literal for dictionary creation * List creation could be rewritten as a list literal * Remove duplicate name attribute * Incorrect variable name is declared * Fix Request ID has a double 'req-' at the start * Fix few typos in glance * Updated from global requirements * Fix 501 error when accessing the server with a non-existent method * Imported Translations from Transifex * Fix existing migrations to create utf-8 tables for MySQL DB * Remove Catalog Index Service * Fix error message's format in image\_member * Include metadefs files in all packages 11.0.0.0b2 ---------- * Move to using futurist library for taskflow executors * Updated from global requirements * Glance to handle exceptions from glance\_store * Keeping the configuration file with convention * Fix Python 3 issues in glance.tests.unit * Allow ramdisk\_id, kernel\_id to be null on schema * Remove duplicate string * Imported Translations from Transifex * Update glance\_store requirement to 0.7.1 * Fix Rally job failure * Make utf8 the default charset for mysql * Use oslo\_utils.encodeutils.exception\_to\_unicode() * Updated from global requirements * Remove H302,H402,H904 * add annotation of param * Adds a rados\_connect\_timeout description * Fix the document bug in part of digest\_algorithm * Purge dead file-backed scrubber queue code * Correct reference to VC as vCenter * Remove usage of assert\_called\_once in mocks * Rationalize test asserts * Add .eggs/\* to .gitignore * Refactoring of image-members v2 API implementation * Improve code readability in functional test for the WSGIServer * Make 'id' a read only property for v2 * Healthcheck Middleware * Updated from global requirements * Functional of the HTTPclient was put in own method * Fix wrong check when create image without data * Remove unneeded OS\_TEST\_DBAPI\_ADMIN\_CONNECTION * glance metadef resource-type-associate fails in postgresql * Change default digest\_algorithm value to sha256 * Update requirements * Remove unused oslo incubator files * Remove unnecessary mixin from artifact domain model * Adds os\_admin\_user to common OS image prop metadef * Validate size of 'min\_ram' and 'min\_disk' * Remove unused imported marker functions * Fix duplicate unique constraint in sqlite migration * Fix broken URL to docs.openstack.org * Remove unnecessary executable permission * Fix the db\_sync problem in 039 for db2 * Imported Translations from Transifex * Fix OSProfiler exception when is enabled * Add an API call to discover the list of available artifact types 11.0.0.0b1 ---------- * Provide extra parameter for FakeDB * Switch to oslo.service * tests: don't hardcode strace usage * Fix tox -e py34 * Imported Translations from Transifex * Typo fix * Drop use of 'oslo' namespace package * Update version for Liberty 11.0.0a0 -------- * Add client\_socket\_timeout option * Switch from MySQL-python to PyMySQL * Fix grammar in installation documentation * Use ConfigFixture to ensure config settings are reverted * Change status code from 500 to 400 for image update request * Added test for "delete image member for public image" * Pass environment variables of proxy to tox * Add info how to avoid issues with token expiration * Fix Python 3 issues * Cleanup TODO in glance/gateway.py for elasticsearch being unavailable * Fix DbError when image params are out of range * REST API layer for Artifact Repository * Remove duplicate creation of use\_user\_token * Correct bad documentation merge * Sync with latest oslo-incubator * Fix HTTP 500 on NotAuthenticated in registry (v2) * Domain layer for Artifact Repository * Refactoring registry tests for v2 * Return empty str for permissive, none, properties * Fix typo in the code * Fixed error message for negative values of min\_disk and min\_ram * Changes in rally-jobs/README.rst * Make create task as non-blocking * Mark task as failed in case of flow failure * Add VMDK as a conversion format to convert flow * Make properties roles check case-insensitive * Imported Translations from Transifex * Change generic NotFound to ImageNotFound exception * Remove is\_public from domain layer * Leverage dict comprehension in PEP-0274 * Fix Server.start() on Python 3.4 * Use six.moves to fix imports on Python 3 * Imported Translations from Transifex * Bug : tox -egenconfig failure (no glance-search.conf) * Replace types.NameType with name * Fix test\_opts to not resolve requirements * Fix logging task id when task fails * Fix typo in documentation * rpc: remove wrong default value in allowed exceptions * rpc: clean JSON serializer, remove strtime() usage * Set filesystem\_store\_datadir in tests * Taskflow engine mode should be parallel in sample conf * VMware: vmware\_ostype should be enum * VMware: add VirtualVmxnet3 to hw\_vif\_model * Fixed glance.tests.unit.test\_artifacts\_plugin\_loader unit-test * Fix delayed activation without disk and containers formats * Save image data after setting the data * Make sure the converted image is imported * Updated from global requirements * Imported Translations from Transifex * Register oslo.log's config options in tests * Remove string formatting from policy logging * Remove unneeded setup hook from setup.cfg * Drop use of 'oslo' namespace package 2015.1.0 -------- * Metadef JSON files need to be updated * Plugin types are not exposed to the client * v1 API should be in SUPPORTED status * Read tag name instead of ID * v1 API should be in SUPPORTED status * API calls to Registry now maintain Request IDs * Updated from global requirements * Remove ordereddict from requirements * Release Import of Translations from Transifex * Glance database architecture section * update .gitreview for stable/kilo * Plugin types are not exposed to the client * Revert "Reduce DB calls when getting an image" * Read tag name instead of ID * Metadef JSON files need to be updated * Fix wrong docstring by copy-paste * Add logging when policies forbid an action * Remove non-ascii characters in glance/doc/source/architecture.rst * Fix typos in glance/doc/source/configuring.rst * Correct text in error response 2015.1.0rc1 ----------- * Fixes glance-manage exporting meta definitions issue * Catch UnknownScheme exception * Refactor API function test class * Move elasticsearch dep to test-requirements.txt * Update openstack-common reference in openstack/common/README * glance-manage output when ran without any arguments * Reduce DB calls when getting an image * Open Liberty development * Zero downtime config reload (glance-control) * Imported Translations from Transifex * Glance cache to not prune newly cached images * glance-manage db load\_metadefs does not load all resource\_type\_associations * Fix intermittent unit test failures * Fix intermittent test case failure due to dict order * Imported Translations from Transifex * A mixin for jsonpatch requests validation * Artifact Plugins Loader * Declarative definitions of Artifact Types * Creating metadef object without any properties * Zero downtime config reload (log handling) * Database layer for Artifact Repository * Catalog Index Service - Index Update * Catalog Index Service * Zero downtime config reload (socket handling) * Typo in pylintrc file * Fix metadef tags migrations * Update documentation for glance-manage * Fix common misspellings 2015.1.0b3 ---------- * Replace assert statements with proper control-flow * Remove use of contextlib.nested * Use graduated oslo.policy * oslo: migrate namespace-less import paths * Fix typo in rpc controller * Fixes typo in doc-string * wsgi: clean JSON serializer * Remove scrubber cleanup logic * use is\_valid\_port from oslo.utils * Add ability to deactivate an image * Remove deprecated option db\_enforce\_mysql\_charset * Raise exception if store location URL not found * Fix missing translations for error and info * Basic support for image conversion * Extend images api v2 with new sorting syntax * Add the ability to specify the sort dir for each key * Move to graduated oslo.log module * Provide a way to upgrade metadata definitions * Pass a real image target to the policy enforcer * Glance basic architecture section * Fix typo in configuration file * Updated from global requirements * Add sync check for models\_metadef * Notifications for metadefinition resources * Update config and docs for multiple datastores support * Avoid usability regression when generating config * Glance Image Introspection * Add capabilities to storage driver * Updated from global requirements * Zero downtime configuration reload * Add operators to provide multivalue support * Remove the eventlet executor * SemVer utility to store object versions in DB * Switch to latest oslo-incubator * Use oslo\_config choices support * Fix the wrong format in the example * Remove en\_US translation * Git ignore covhtml directory * db\_export\_metadefs generates inappropriate json files * Synchronising oslo-incubator service module * Unify using six.moves.range rename everywhere * Updated from global requirements * Glance returns HTTP 500 for image download * Remove boto from requirements.txt * Unbreak python-swiftclient gate * Eventlet green threads not released back to pool * Imported Translations from Transifex * Removes unnecessary assert * Prevents swap files from being found by Git * Add BadStoreConfiguration handling to glance-api * Remove redundant parentheses in conditional statements * Make sure the parameter has the consistent meaning * Image data remains in backend for deleted image * Remove is\_public from reserved attribute in v2 * unify some messages * Typos fixed in the comments * The metadef tags create api does not match blue-print * Clarified doc of public\_endpoint config option * Add detail description of image\_cache\_max\_size * Updated from global requirements 2015.1.0b2 ---------- * Add Support for TaskFlow Executor * Include readonly flag in metadef API * Fix for CooperativeReader to process read length * Software Metadata Definitions * Updated from global requirements * Rewrite SSL tests * Replace snet config with endpoint config * Simplify context by using oslo.context * Handle empty request body with chunked encoding * Update vmware\_adaptertype metadef values * Typos fixed in the comments * Updated from global requirements * Redundant \_\_init\_\_ def in api.authorization.MetadefTagProxy * Make digest algorithm configurable * Switch to mox3 * Remove argparse from requirement * Remove optparse from glance-replicator * Eliminate shell param from subprocesses in tests * Remove test dependency on curl * Cleanup chunks for deleted image that was 'saving' * remove need for netaddr * Fix copy-from when user\_storage\_quota is enabled * remove extraneous --concurrency line in tox * SQL scripts should not manage transactions * Fixes line continuations * Upgrade to hacking 0.10 * Removed python-cinderclient from requirements.txt * Move from oslo.db to oslo\_db * Move from oslo.config to oslo\_config * Improve documentation for glance\_stores * Fix reference to "stores" from deprecated name * Move from oslo.utils to oslo\_utils * Updated from global requirements * Updated from global requirements * Prevent file, swift+config and filesystem schemes * Simplify usage of str.startswith * Adding filesystem schema check in async task * Fix spelling typo * Fix rendering of readme document * Imported Translations from Transifex * Add swift\_store\_cacert to config files and docs * Add latest swift options in glance-cache.conf * Fix document issue of image recover status * rename oslo.concurrency to oslo\_concurrency * Provide a quick way to run flake8 * Fix 3 intermittently failing tests * Removed obsolete db\_auto\_create configuration option * Fix client side i18n support for v1 api * Move default\_store option in glance-api.conf * Removes http-requests to glance/example.com in glance test * Remove \_i18n from openstack-common * Adds the ability to sort images with multiple keys * Add sort key validation in v2 api * Fixes typo: glance exception additional dot * Allow $OS\_AUTH\_URL environment variable to override config file value * Bump API version to 2.3 * Replace '\_' with '\_LI', '\_LE', '\_LW', '\_LC' 2015.1.0b1 ---------- * Removes unused modules: timeutils and importutils * Generate glance-manage.conf * Imported Translations from Transifex * Adding Metadef Tag support * Removed unnecessary dot(.) from log message * Using oslo.concurrency lib * Update config and docs for Multiple Containers * To prevent client use v2 patch api to handle file and swift location * Updated from global requirements * Use testr directly from tox * Remove reliance on import order of oslo.db mods * Remove openstack.common.gettextutils module * Fix typo in common module * Fix and add a test case for IPv6 * Start server message changed * Fix getaddrinfo if dnspython is installed * Workflow documentation is now in infra-manual * Allow None values to be returned from the API * Expose nullable fields properties * Allow some fields to be None * Update glance.openstack.common.policy and cleanup * A small refactoring of the domain * Updated from global requirements * Disable osprofiler by default * Work toward Python 3.4 support and testing * Correct GlanceStoreException to provide valid message - Glance * Remove Python 2.6 classifier * Add ModelSMigrationSync classes * Alter models and add migration * No 4 byte unicode allowed in image parameters * Update rally-jobs files * Move from using \_ builtin to using glance.i18n \_ * Change Glance to use i18n instead of gettextutils * Raising glance logging levels * Imported Translations from Transifex * Do not use LazyPluggable * metadef modules should only use - from wsme.rest import json * Wrong order of assertEquals args(Glance) * Removal of unnecessary sample file from repository * Upgrade tests' mocks to match glance\_store * Remove exception declarations from replicator.py * Typo correction of the prefix value in compute-host-capabilities * Replace custom lazy loading by stevedore * vim ropeproject directories added to gitignore * Initiate deletion of image files if the import was interrupted * Raise an exception when quota config parameter is broken * Fix context storage bug * Ignore Eric IDE files and folders in git * Make RequestContext use auth\_token (not auth\_tok) * Swift Multi-tenant store: Pass context on upload * Use unicode for error message * change default value for s3\_store\_host * remove url-path from the default value of s3\_store\_host * Complete the change of adding public\_endpoint option * Update the vmware\_disktype metadefs values * Add config option to override url for versions * Separate glance and eventlet wsgi logging * Remove openstack.common.test * Remove modules from openstack-common.conf * Improve error log for expired image location url * Handle some exceptions of image\_create v2 api * Remove eventlet\_hub option * Adds openSUSE in the installing documentation * Glance scrubber should page thru images from registry * Add logging to image\_members and image\_tags * Update glance.openstack.common 2014.2 ------ * Fix options and their groups - etc/glance-api.conf * Fix options and their groups - etc/glance-api.conf * Adjust authentication.rst doc to reference "identity\_uri" * Can not delete images if db deadlock occurs * Reduce extraneous test output * Isolate test from environment variables * Fix for adopt glance.store library in Glance * Adjust authentication.rst doc to reference "identity\_uri" 2014.2.rc2 ---------- * Use identity\_uri instead of older fragments * Prevent setting swift+config locations * Metadef schema column name is a reserved word in MySQL * Remove stale chunks when failed to update image to registry * GET property which name includes resource type prefix * g-api raises 500 error while uploading image * Fix for Adopt glance.store library in Glance * Update Metadefs associated with ImagePropertiesFilter * updated translations * Use ID for namespace generated by DB * Metadef Property and Object schema columns should use JSONEncodedDict * Add missing metadefs for shutdown behavior * Update driver metadata definitions to Juno * Mark custom properties in image schema as non-base * Specify the MetadefNamespace.namespace column is not nullable * Make compute-trust.json compatible with TrustFilter * Include Metadata Defs Concepts in Dev Docs * Nova instance config drive Metadata Definition * Add missing metadefs for Aggregate Filters * Updated from global requirements 2014.2.rc1 ---------- * Imported Translations from Transifex * Add specific docs build option to tox * Add documentation for a new storage file permissions option * Updated from global requirements * Remove db\_enforce\_mysql\_charset option for db\_sync of glance-manage * Fix assertEqual arguments order * Prevent setting swift+config locations * Remove stale chunks when failed to update image to registry * Use specific exceptions instead of the general MetadefRecordNotFound * Metadef schema column name is a reserved word in MySQL * Fix for Adopt glance.store library in Glance * GET property which name includes resource type prefix * Incorrect parameters passed * g-api raises 500 error while uploading image * Minor style tidy up in metadata code * Metadef Property and Object schema columns should use JSONEncodedDict * Updated from global requirements * Use ID for namespace generated by DB * Switch to oslo.serialization * Switch to oslo.utils * Imported Translations from Transifex * Add missing metadefs for shutdown behavior * hacking: upgrade to 0.9.x serie * Fix bad header bug in glance-replicator * Run tests with default concurrency 0 * Refactor test\_migrations module * Include Metadata Defs Concepts in Dev Docs * Open Kilo development * Mark custom properties in image schema as non-base * Fix missing space in user\_storage\_quota help message * Fix glance V2 incorrectly implements JSON Patch'add' * Make compute-trust.json compatible with TrustFilter * replace dict.iteritems() with six.iteritems(dict) * Enforce using six.text\_type() over unicode() * Update driver metadata definitions to Juno * Remove uses of unicode() builtin * Fixes Error Calling GET on V1 Registry * Enabling separated sample config file generation * Update Metadefs associated with ImagePropertiesFilter * Fixes logging in image\_import's main module * Refactor metadef ORM classes to use to\_dict instead of as\_dict * Stop using intersphinx * Just call register\_opts in tests * Replaces assertEqual with assertTrue and assertFalse * Block sqlalchemy-migrate 0.9.2 * Specify the MetadefNamespace.namespace column is not nullable * Add missing metadefs for Aggregate Filters * Nova instance config drive Metadata Definition * Improve OS::Compute::HostCapabilities description * Sync glance docs with metadefs api changes * Change open(file) to with block * Fix CommonImageProperties missing ":" * Fix VMware Namespace capitalization & description * Imported Translations from Transifex * Duplicated image id return 409 instead of 500 in API v2 * Glance API V2 can't recognize parameter 'id' * API support for random access to images * Adopt glance.store library in Glance * Adds missing db registry api tests for Tasks * warn against sorting requirements * Introduces eventlet executor for Glance Tasks 2014.2.b3 --------- * Glance Metadata Definitions Catalog - API * ignore .idea folder in glance * Glance Metadata Definitions Catalog - Seed * Glance Metadata Definitions Catalog - DB * Restrict users from downloading protected image * Syncing changes from oslo-incubator policy engine * Use identity\_uri instead of older fragments * Fix legacy tests using system policy.json file * Improve Glance profiling * Fix collection order issues and unit test failures * Check on schemes not stores * Replacement mox by mock * Imported Translations from Transifex * Log task ID when the task status changes * Changes HTTP response code for unsupported methods * Enforce image\_size\_cap on v2 upload * Do not assume order of images * Ensure constant order when setting all image tags * Fix bad indentation in glance * Use @mock.patch.object instead of mock.MagicMock * Adding status field to image location -- scrubber queue switching * Bump osprofiler requirement to 0.3.0 * Fix migration on older postgres * Fix rally performance job in glance * Integrate OSprofiler and Glance * Fix image killed after deletion * VMware store: Use the Content-Length if available * Fix RBD store to use READ\_CHUNKSIZE * Trivial fix typo: Unavilable to Unavailable * Quota column name 'key' in downgrade script * Do not log password in swift URLs in g-registry * Updated from global requirements * Use \`\_LW\` where appropriate in db/sqla/api * Log upload failed exception trace rather than debug * Decouple read chunk size from write chunk size * Enable F821 check: undefined name 'name' 2014.2.b2 --------- * Security hardening: fix possible shell injection vulnerability * Move to oslo.db * Catch exception.InUseByStore at API layer * Fixes the failure of updating or deleting image empty property * Adding status field to image location -- scrubber changes * Also run v2 functional tests with registry * Refactoring Glance logging lowering levels * Set defaults for amqp in glance-registry.conf * Fix typo in swift store message * Add a \`\_retry\_on\_deadlock\` decorator * Use auth\_token from keystonemiddleware * Allow some property operations when quota exceeded * Raising 400 Bad Request when using "changes-since" filter on v2 * Moving eventlet.hubs.use\_hub call up * Adding status field to image location -- domain and APIs changes * Add task functions to v2 registry * Changing replicator to use openstack.common.log * Fix unsaved exception in v1 API controller * Pass Message object to webob exception * Some exceptions raise UnicodeError * Handle session timeout in the VMware store * Some v2 exceptions raise unicodeError * Resolving the performance issue for image listing of v2 API on server * Switch over oslo.i18n * Fix typo in comment * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Raise NotImplementedError instead of NotImplemented * Fix unsaved exception in store.rbd.Store.add() * Fix docstrings in enforce() and check() policy methods * Added an extra parameter to the df command * Add CONTRIBUTING.rst * Imported Translations from Transifex * Use (# of CPUs) glance workers by default * Sync processutils and lockutils from oslo with deps * Document registry 'workers' option * Removing translation from debug messages * Unifies how BadStoreUri gets raised and logged * Fix lazy translation UnicodeErrors * Changing Sheepdog driver to use correct configuration function * Implemented S3 multi-part upload functionality * Log swift container creation * Synced jsonutils and its dependencies from oslo-incubator * Remove user and key from location in swift * Updated from global requirements * Changed psutil dep. to match global requirements * Add pluging sample for glance gate * Fixes v2 return status on unauthorized download * Update documentation surrounding the api and registry servers * Do not call configure several times at startup * Move \`location\`'s domain code out of glance.store * sync oslo incubator code * notifier: remove notifier\_strategy compat support * notifier: simply notifier\_strategy compat support * colorizer: use staticmethod rather than classmethod * Improved coverage for glance.api.\* * Assign local variable in api.v2.image\_data 2014.2.b1 --------- * Use df(1) in a portable way * Add test for no\_translate\_debug\_logs hacking check * Add hacking checks * replace dict.iteritems() with six.iteritems(dict) * make uploading an image as public admin only by default * remove default=None for config options * Bump python-swiftclient version * TaskTest:test\_fail() should use asserIstNone * debug level logs should not be translated * use /usr/bin/env python instead of /usr/bin/python * Remove all mostly untranslated PO files * Remove duplicated is\_uuid\_like() function * fixed typos found by RETF rules in RST files * Use safe way through "with" statement to work with files * Clean up openstack-common.conf * Removing duplicate entry from base\_conf * Use safe way through "with" statement to work with files * Use Chunked transfer encoding in the VMware store * Ensures that task.message is of type unicode * Replace unicode() for six.text\_type * Prevent creation of http images with invalid URIs * Fixed a handful of typos * Fixes installation of test-requirements * Add rally performance gate job for glance * To fixes import error for run\_tests.sh * Replace assert\* with more suitable asserts in unit tests * Get rid of TaskDetails in favor of TaskStub * Fixes "bad format" in replicator for valid hosts * Sync latest network\_utils module from Oslo * Fixes spelling error in test name * Uses None instead of mutables for function param defaults * Fix various Pep8 1.5.4 errors * Fixes Glance Registry V2 client * Update Glance configuration sample files for database options * To prevent remote code injection on Sheepdog store * Added undescore function to some log messages * Adds TaskStub class * Updated from global requirements * user\_storage\_quota now accepts units with value * Do not allow HEAD images/detail * Configuration doc for VMware storage backend * Catch loading failures if transport\_url is not set * Fix Jenkins translation jobs * Fixed the pydev error message 2014.1.rc1 ---------- * Open Juno development * Making DB sanity checking be optional for DB migration * Fix deprecation warning in test\_multiprocessing * Do not set Location header on HTTP/OK (200) responses * Fix swift functional test "test\_create\_store" * Sanitize set passed to jsonutils.dumps() * When re-raising exceptions, use save\_and\_reraise * Imported Translations from Transifex * Sync common db code from Oslo * Return 405 when attempting DELETE on /tasks * Remove openstack.common.fixture * Enable H304 check * VMware store.add to return the image size uploaded * registry: log errors on failure * Removes use of timeutils.set\_time\_override * Provide explicit image create value for test\_image\_paginate case * Make the VMware datastore backend more robust * Pass Message object to webob exception * Detect MultiDict when generating json body * Makes possible to enable Registry API v1 and v2 * Do not use \_\_builtin\_\_ in python3 * Updated from global requirements * Fix swift functional test * Provide an upgrade period for enabling stores * API v2: Allow GET on unowned images with show\_image\_direct\_url * Add copyright text to glance/openstack/common/\_\_init\_\_.py * Don't enable all stores by default * Remove unused methods * Fix glance db migration failed on 031 * Document for API message localization 2014.1.b3 --------- * Add support for API message localization * Add the OVA container format * Store URI must start with the expected URI scheme * Documentation for Glance tasks * Remove import specific validation from tasks resource * Remove dependency of test\_v1\_api on other tests * Include Location header in POST /tasks response * Catch exception when image cache pruning * VMware storage backend should use oslo.vmware * Sync common db code from Oslo * Refactor UUID test * Replaced calls of get(foo, None) -> get(foo) * Use six.StringIO/BytesIO instead of StringIO.StringIO * Replaced "...\'%s\'..." with "...'%s'..." * Updated from global requirements * Fix logging context to include user\_identity * Log 'image\_id' with all BadStoreURI error messages * Added undescore function to some strings * Use 0-based indices for location entries * Glance all: Replace basestring by six for python3 compatability * Delete image metadata after image is deleted * Modify assert statement when comparing with None * Enable hacking H301 and disable H304, H302 * Replacement mox by mock * Keep py3.X compatibility for urllib * Use uuid instead of uuidutils * Use six.moves.urllib.parse instead of urlparse * Switch over to oslosphinx * Fix parsing of AMQP configuration * Add \`virtual\_size\` to Glance's API v2 * Add a virtual\_size attribute to the Image model * Enable F841 check * Add support for PartialTask list * Rename Openstack to OpenStack * Add a mailmap entry for myself * Sync log.py from oslo * Add unit tests around glance-manage * Remove tox locale overrides * Improve help strings * Provide explicit image create value in Registry v2 API test * VMware Datastore storage backend * Adding status field to image location -- DB migration * Apply image location selection strategy * Switch to testrepository for running tests * Clean up DatabaseMigrationError * Enable H302 check * Fix misspellings in glance * Expose image property 'owner' in v2 API * Removes logging of location uri * Updated from global requirements * Remove duplicate type defination of v2 images schema * Enable H202 check * Modify my mailmap * glance-manage wont take version into consideration * Move scrubber outside the store package * Depending on python-swiftclient>=1.6 * Now psutil>=1.1.0 is actually on PyPI * Fix indentation errors found by Pep8 1.4.6+ * Add VMware storage backend to location strategy * Log a warning when a create fails due to quota * glance requires pyOpenSSL>=0.11 * Imported Translations from Transifex * Restore image status to 'queued' if upload failed * Don't override transport\_url with old configs * Provide explicit image create value in Registry v2 Client test * Provide explicit task create and update value in controller tests * Enable hacking H703 check * Sync with global requirements * Sync oslo.messaging version with global-requirements * Don't rewrite the NotFound error message * Update all the glance manpages * Use common db migrations module from Oslo * Check --store parameter validity before \_reserve * Sync gettextutils from Oslo * Enable gating on H501 * Add multifilesystem store to support NFS servers as backend * Check first matching rule for protected properties * Retry failed image download from Swift * Restore image status on duplicate image upload 2014.1.b2 --------- * Tests added for glance/cmd/cache\_pruner.py * Prevent E500 when delayed delete is enabled * Sync unhandled exception logging change from Oslo * Check image id format before executing operations * fix bug:range() is not same in py3.x and py2.x * Fix the incorrect log message when creating images * Adding image location selection strategies * Fix inconsistent doc string and code of db\_sync * fixing typo in rst file * Fix tmp DB path calculation for test\_migrations.py * Change assertTrue(isinstance()) by optimal assert * add log for \_get\_images method * Makes 'expires\_at' not appear if not set on task * Remove vim header * Update the glance-api manpage * Remove 'openstack/common/context.py' * Allow users to customize max header size * Decouple the config dependence on glance domain * Fix typo in doc string * Prevent min\_disk and min\_ram from being negative * Set image size to None after removing all locations * Update README to the valid Oslo-incubator doc * Cleans up imports in models.py * Sync Log levels from OSLO * Align glance-api.conf rbd option defaults with config * Bump hacking to 0.8 and get python 3.x compatibility * Add config option to limit image locations * replace type calls with isinstance * Adding logs to tasks * Skip unconfigurable drivers for store initialization * Fix typo in gridfs store * Oslo sync to recover from db2 server disconnects * fix comments and docstrings misspelled words * Fix call to store.safe\_delete\_from\_backend * Switch to Hacking 0.8.x * assertEquals is deprecated, use assertEqual (H234) * Consider @,! in properties protection rule as a configuration error * Remove unused imports in glance * Remove return stmt of add,save and remove method * Migrate json to glance.openstack.common.jsonutils * Use common Oslo database session * Define sheepdog\_port as an integer value * Sync with oslo-incubator (git 6827012) * Enable gating on F811 (duplicate function definition) * Set image size after updating/adding locations * Disallow negative image sizes * Fix and enable gating on H306 * Make code base E125 and E126 compliant * Fix 031 migration failed on DB2 * Remove the redundant code * Correct URL in v1 test\_get\_images\_unauthorized * Refactor tests.unit.utils:FakeDB.reset * Fixed wrong string format in glance.api.v2.image\_data * Empty files shouldn't contain copyright nor license * Use uuid instead of uuidutils * Enable H233/H301/H302 tests that are ignored at the moment * Remove duplicate method implementations in ImageLocationsProxy * Make Glance code base H102 compliant * Make Glance code base H201 compliant * Cleanup: remove unused code from store\_utils * Filter out deleted images from storage usage * Add db2 communication error code when check the db connection * Refine output of glance service managment * Adds guard against upload contention * Fixes HTTP 500 when updating image with locations for V2 * Increase test coverage for glance.common.wsgi * Return 204 when image data does not exist * V2: disallow image format update for active status * Enable tasks REST API for async worker * Cleanly fail when location URI is malformed * Rename duplicate test\_add\_copy\_from\_upload\_image\_unauthorized * Adding missing copy\_from policy from policy.json * Fix simple-db image filtering on extra properties * Pin sphinx to <1.2 * assertEquals is deprecated, use assertEqual instead * Fix and enable gating on H702 * Replace startswith by more precise store matching * Remove unused exceptions * Remove duplicate method \_\_getitem\_\_ in quota/\_\_init\_\_.py * Enforce copy\_from policy during image-update * Refactor StorageQuotaFull test cases in test\_quota * remove hardcode of usage * Added error logging for http store * Forbidden update message diffs images/tasks/member * Unittests added for glance/cmd/cache\_manage.py * Makes tasks owner not nullable in models.py * Move is\_image\_sharable to registry api * Remove TestRegistryDB dependency on TestRegistryAPI * Introduce Task Info Table 2014.1.b1 --------- * Migrate to oslo.messaging * Add config option to limit image members * Add config option to limit image tags * Glance image-list failed when image number exceed DEFAULT\_PAGE\_SIZE * DB migration changes to support DB2 as sqlalchemy backend * Add documentation for some API parameters * RBD add() now returns correct size if given zero * Set upload\_image policy to control data upload * Replace deprecated method assertEquals * Clean up duplicate code in v2.image\_data.py * Fix docstring on detail in glance/api/v1/images.py * Use assertEqual instead of assertEquals in unit tests * Remove unused package in requirement.txt * Enable F40X checking * Verify for duplicate location+metadata instances * Adds domain level support for tasks * Add eclipse project files to .gitignore * Added unit tests for api/middleware/cache\_manage.py * Fixed quotes in \_assert\_tables() method * Use common db model class from Oslo * Add upload policy for glance v2 api * Adding an image status transition diagram for dev doc * Add config option to limit image properties * Explicit listing of Glance policies in json file * Imported Translations from Transifex * Sync openstack.common.local from oslo * Clean up numeric expressions with oslo constants * Don't use deprecated module commands * Add tests for glance/notifier/notify\_kombu * Fixes image delete and upload contention * Log unhandled exceptions * Add tests for glance/image\_cache/client.py * Remove lxml requirement * Sync common db and db.sqlalchemy code from Oslo * Update glance/opensatck/common from oslo Part 3 * Tests added for glance/cmd/cache\_cleaner.py * glance-manage should work like nova-manage * Adds tasks to db api * Sync lockutils from oslo * sync log from oslo * Add policy style '@'/'!' rules to prop protections * Enable H501: do not use locals() for formatting * Remove use of locals() when creating messages * Remove "image\_cache\_invalid\_entry\_grace\_period" option * Add unit test cases for get func of db member repo * assertEquals is deprecated, use assertEqual * Document default log location in config files * Remove unused method setup\_logging * Start using PyFlakes and Hacking * Sync units module from olso * Fixes error message encoding issue when using qpid * Use mock in test\_policy * Use packaged version of ordereddict * Imported Translations from Transifex * Glance v2: Include image/member id in 404 Response * Replace qpid\_host with qpid\_hostname * Fix Pep8 1.4.6 warnings * Fixes content-type checking for image uploading in API v1 and v2 * Update my mailmap * Addition of third example for Property Protections * Sync iso8601 requirement and fixes test case failures * Fixes wrong Qpid protocol configuration * Use HTTP storage to test copy file functionality * Remove redundant dependencies in test-requirements * Documentation for using policies for protected properties * checking length of argument list in "glance-cache-image" command * optimize queries for image-list * Using policies for protected properties * Cleanup and make HACKING.rst DRYer * Enable tasks data model and table for async worker * Updated from global requirements * Add call to get specific image member * Put formatting operation outside localisation call * Remove unused import * The V2 Api should delete a non existent image * Avoid printing URIs which can contain credentials * Remove whitespace from cfg options * Use Unix style LF instead of DOS style CRLF * Adding 'download\_image' policy enforcement to image cache middleware * Glance manage should parse glance-api.conf * Fixes rbd \_delete\_image snapshot with missing image * Correct documentation related to protected properties * Update functional tests for swift changes * Removed unsued import, HTTPError in v1/images.py * Allow tests to run with both provenances of mox * Glance GET /v2/images fails with 500 due to erroneous policy check * Do not allow the same member to be added twice 2013.2.rc1 ---------- * V2 RpcApi should register when db pool is enabled * Imported Translations from Transifex * Open Icehouse development * Convert Windows to Unix style line endings * Add documentation for property protections * Adding checking to prevent conflict image size * Fixes V2 member-create allows adding an empty tenantId as member * Fixing glance-api hangs in the qpid notifier * Change response code for successful delete image member to 204 * Cache cleaner wrongly deletes cache for non invalid images * Require oslo.config 1.2.0 final * Use built-in print() instead of print statement * Swift store add should not use wildcard raise * Corrected v2 image sharing documentation * Add swift\_store\_ssl\_compression param * Log a message when image object not found in swift * Ensure prop protections are read/enforced in order * Funtional Tests should call glance.db.get\_api * Enclose command args in with\_venv.sh * Fix typo in config string * Adding encryption support for image multiple locations * Fixes typos of v1 meta data in glanceapi.rst * Respond with 410 after upload if image was deleted * Fix misused assertTrue in unit tests * Convert location meta data from pickle to string * Disallow access/modify members of deleted image * Fix typo in protected property message * Remove the unused mapper of image member create * Changed header from LLC to Foundation based on trademark policies * Implement protected properties for API v1 * Add rbd store support for zero size image * Remove start index 0 in range() * Convert non-English exception message when a store loading error * add missing index for 'owner' column on images table * Publish recent api changes as v2.2 * Update schema descriptions to indicate readonly * Enable protected properties in gateway * Property Protection Layer * Rule parser for property protections * Scrubber refactoring * Fix typo in IMAGE\_META\_HEADERS * Fix localisation string usage * Notify error not called on upload errors in V2 * Fixes files with wrong bitmode * Remove unused local vars * Clean up data when store receiving image occurs error * Show traceback info if a functional test fails * Add a storage quota * Avoid redefinition of test * Fix useless assertTrue * emit warning while running flake8 without virtual env * Fix up trivial License mismatches * Introduced DB pooling for non blocking DB calls * Use latest Oslo's version * Improve the error msg of v2 image\_data.py * Fix Sphinx warning * Remove unused import * test failure induced by reading system config file * Prefetcher should perform data integrity check * Make size/checksum immutable for active images * Remove unused var DEFAULT\_MAX\_CACHE\_SIZE * Implement image query by tag * Remove unused import of oslo.config * Code dedup in glance/tests/unit/v1/test\_registry\_api.py * Add unit test for migration 012 * Call \_post\_downgrade\_### after downgrade migration is run * Use \_pre\_upgrade\_### instead of \_prerun\_### * Perform database migration snake walk test correctly * redundant conditions in paginate-query * Refactor glance/tests/unit/v2/test\_registry\_client.py * Refactor glance/tests/unit/v1/test\_registry\_client.py * Improve test/utils.py * Make sure owner column doesn't get dropped during downgrade * image-delete fires multiple queries to delete its child entries * glance-replicator: enable logging exceptions into log file * Make disk and container formats configurable * Add space in etc/glance-cache.conf * Removes duplicate options registration in registry clients * remove flake8 option in run\_tests.sh * Allow tests to run without installation * Remove glance CLI man page * Fix some logic in get\_caching\_iter * Adding metadata checking to image location proxy layer * Update .mailmap * Migrate to PBR for setup and version code * Interpolate strings after calling \_() * BaseException.message is deprecated since Python 2.6 * Raise jsonschema requirement * Text formatting changes * Using unicode() convert non-English exception message * ambiguous column 'checksum' error when querying image-list(v2) * Handle None value properties in glance-replicator * Fixes Opt types in glance/notifier/notify\_kombu.py * Add unit test for migration 010 * Sync models with migrations * Rename requirements files to standard names * Include pipeline option for using identity headers * Adding arguments pre-check for glance-replicator * Add v1 API x-image-meta- header whitelist * Stub out dependency on subprocess in unit tests * Allow insecure=True to be set in swiftclient * Verify if the RPC result is an instance of dict * Adds help messages to mongodb\_store\_db and mongodb\_store\_uri * Remove support for sqlalchemy-migrate < 0.7 * Don't rely on prog.Name for paste app * Simulate image\_locations table in simple/api.py * Turn off debug logging in sqlalchemy by default * Glance api to pass identity headers to registry v1 * add doc/source/api in gitignore * Use cross-platform 'ps' for test\_multiprocessing * Fix stubs setup and exception message formatting * Handle client disconnect during image upload * improving error handling in chunked upload 2013.2.b2 --------- * Adding Cinder backend storage driver to Glance * File system store can send metadata back with the location * index checksum image property * removed unused variable 'registry\_port' * DB Driver for the Registry Service * Unit tests for scrubber * Remove references to clean arg from cache-manage * Deleting image that is uploading leaves data * Adding a policy layer for locations APIs * Add/remove/replace locations from an image * Adding multiple locations support to image downloading * Make db properties functions consistent with the DB API * Adds missing error msg for HTTPNotFound exception * Allow storage drivers to add metadata to locations * Fixes image-download error of v2 * On deleting an image, its image\_tags are not deleted * Sync gettextutils from oslo * Adding store location proxy to domain * Notify does not occur on all image upload fails * Add location specific information to image locations db * Add custom RPC(Des|S)erializer to common/rpc.py * use tenant:\* as swift r/w acl * Add image id to the logging message for upload * Fix cache delete-all-queued-images for xattr * Fix stale process after unit tests complete * Sync install\_venv\_common from oslo * Fix list formatting in docs * Fix doc formatting issue * Ignore files created by Sphinx build * Use oslo.sphinx and remove local copy of doc theme * Refactor unsupported default store testing * Add Sheepdog store * Fix 'glance-cache-manage -h' default interpolation * Fix 'glance-cache-manage list-cached' for xattr * Dont raise NotFound in simple db image\_tag\_get\_all * Use python module loading to run glance-manage * Removed unusued variables to clean the code * Fixes exposing trace during calling image create API * Pin kombu and anyjson versions * Do not raise NEW exceptions * Port slow, overly assertive v1 functional tests to integration tests * Add a bit of description * Updated documentation to include notifications introduced in Grizzly * Make eventlet hub choice configurable * Don't run store tests without a store! * Import sql\_connection option before using it * Fix for unencrypted uris in scrubber queue files * Fix incorrect assertion in test\_create\_pool * Do not send traceback to clients by default * Use Python 3.x compatible octal literals * Remove explicit distribute depend * Add missing Keystone settings to scrubber conf * Sql query optimization for image detail * Prevent '500' error when admin uses private marker * Replace openstack-common with oslo in HACKING.rst * Patch changes Fedora 16 to 18 on install page * Pass configure\_via\_auth down to auth plugin * Move sql\_connection option into sqlalchemy package * Remove unused dictionary from test\_registry\_api.py * Remove routes collection mappings * updated content\_type in the exception where it is missing * python3: Introduce py33 to tox.ini * Don't make functional tests inherit from IsolatedUnitTest * Add a policy layer for membership APIs * Prevent E500 when listing with null values * Encode headers and params * Fix pydevd module import error * Add documentation on reserving a Glance image * Import strutils from oslo, and convert to it * Sync oslo imports to the latest version 2013.2.b1 --------- * Fix undefined variable in cache * Make passing user token to registry configurable * Respond with 412 after upload if image was deleted * Add unittests for image upload functionality in v1 * Remove glance-control from the test suite * Prevent '500' error when using forbidden marker * Improve unit tests for glance.common package * Improve unit tests for glance.api.v1 module * rbd: remove extra str() conversions and test with unicode * rbd: return image size when asked * Add qpid-python to test-requires * tests: remove unused methods from test\_s3 and test\_swift * Implement Registry's Client V2 * RBD store uses common utils for reading file chunks * Redirects requests from /v# to /v#/ with correct Location header * Add documentation for query parameters * Small change to 'is\_public' documentation * Fix test\_mismatched\_X test data deletion check * Add GLANCE\_LOCALEDIR env variable * Remove gettext.install() from glance/\_\_init\_\_.py * Implement registry API v2 * Add RBD support with the location option * Use flake8/hacking instead of pep8 * Use RBAC policy to determine if context is admin * Create package for registry's client * Compress response's content according to client's accepted encoding * Call os.kill for each child instead of the process group * Improve unit tests for glance.common.auth module * Convert scripts to entry points * Fix functional test 'test\_copy\_from\_swift' * Remove unused configure\_db function * Don't raise HTTPForbidden on a multitenant environment * Expand HACKING with commit message guidelines * Redirects requests from /v# to /v#/ * Functional tests use a clean cached db that is only created once * Fixes for mis-use of various exceptions * scrubber: dont print URI of image to be deleted * Eliminate the race when selecting a port for tests * Raise 404 while deleting a deleted image * Fix test redifinitions * Sync with oslo-incubator copy of setup.py and version.py * Gracefully handle qpid errors * Fix Qpid test cases * Imported Translations from Transifex * Fix the deletion of a pending\_delete image * Imported Translations from Transifex * Imported Translations from Transifex * Fix functional test 'test\_scrubber\_with\_metadata\_enc' * Make "private" functions that shouldn't be exported * Call monkey\_patch before other modules are loaded * Adding help text to the options that did not have it * Improve unit tests for glance.api.middleware.cache module * Add placeholder migrations to allow backports * Add GridFS store * glance-manage should not require glance-registry.conf * Verify SSL certificates at boot time * Invalid reference to self in functional test test\_scrubber.py * Make is\_public an argument rather than a filter * remove deprecated assert\_unicode sqlalchemy attribute * Functional tests display the logs of the services they started * Add 'set\_image\_location' policy option * Add a policy handler to control copy-from functionality * Fallback to inferring image\_members unique constraint name * Standardize on newer except syntax * Directly verifying that time and socket are monkey patched * Reformat openstack-common.conf * Fix domain database initialization * Add tests for image visibility filter in db * Add image\_size\_cap documentation * Return 413 when image\_size\_cap exceeded * Small change to exception handling in swift store * Remove internal store references from migration 017 * Check if creds are present and not None 2013.1.rc1 ---------- * Delete swift segments when image\_size\_cap exceeded * bump version to 2013.2 * Don't print sql password in debug messages * fixes use the fact that empty sequences are false * Handle Swift 404 in scrubber * Remove internal store references from migration 015 * Pin SQLAlchemy to 0.7.x * Add unit tests for glance.api.cached\_images module * Document the os options config for swift store * Segmented images not deleted cleanly from swift * Do not return location in headers * Fix uniqueness constraint on image\_members table * Declare index on ImageMember model * Log when image\_size\_cap has been exceeded * Publish API version 2.1 * Fix scrubber and other utils to use log.setup() * Switch to final 1.1.0 oslo.config release * Mark password options secret * Fix circular import in glance/db/sqlalchemy * Fix up publicize\_image unit test * Fix rabbit\_max\_retry * Fix visibility on db image\_member\_find * Fix calls to image\_member\_find in tests * Characterize image\_member\_find * Retain migration 12 indexes for table image\_properties with sqlite * Insure that migration 6 retains deleted image property index * Fix check\_003 method * Ensure disk\_ and container\_format during upload * Honor metadata\_encryption\_key in glance domain * Fix v2 data upload to swift * Switch to oslo.config * Update acls in the domain model * Refactor leaky abstractions * Remove unused variable 'image\_member\_factory' * Generate notification for cached v2 download * A test for concurrency when glance uses sleep * Update documentation to reflect API v2 image sharing * v1 api image-list does not return shared images * Cannot change locations on immutable images * Update db layer to expose multiple image locations * Test date with UTC instead of local timezone * Added better schemas for image members, revised tests * Add pre and check phases to test migration 006 * Fix response code for successful image upload * Remove unused imports * Add pre and check phases to test migration 005 * Add pre and check phases to test migration 004 * Add PostgreSQL support to test migrations * Enable support for MySQL with test migrations * Set status to 'active' after image is uploaded * Removed controversial common image property 'os\_libosinfo\_shortid' * Parse JSON Schema Draft 10 in v2 Image update * Redact location from notifications * Fix broken JSON schemas in v2 tests * Add migration 021 set\_engine\_mysql\_innodb * Refactor data migration tests * Fix migration 016 for sqlite * Pin jsonschema version below 1.0.0 * Add check for image\_locations table * Avoid using logging in signal handlers * monkey\_patch the time module for eventlet * Remove compat cfg wrapper * Remove unnecessary logging from migration 019 * Fix migration 015 downgrade with sqlite * Document db\_auto\_create in default config files * Update openstack.common * Extend the domain model to v2 image data * Add migration 20 - drop images.location * Add migration 19 - move image location data * Filter images by status and add visibility shared * Update oslo-config version * Sync latest install\_venv\_common.py * Adding new common image properties * Use oslo-config-2013.1b3 * Add migration 18 - create the image\_locations table * Create connection for each qpid notification * Add migration to quote encrypted image location urls * Updates OpenStack LLC with OpenStack Foundation * Allowing member to set status of image membership * Add an update option to run\_tests.sh * Use install\_venv\_common.py from oslo * Add status column to image\_members * Adding image members in glance v2 api * Fix issues with migration 012 * Add migration.py based on the one in nova * Updated\_at not being passed to db in image create * Fix moker typo in test\_notifier * Clean dangling image fragments in filesystem store * Sample config and doc for the show\_image\_direct\_url option * Avoid dangling partial image on size/checksum mismatch * Fix version issue during nosetests run * Adding database layer for image members domain model * Image Member Domain Model * Additional image member information * Adding finer notifications * Add LazyPluggable utility from nova * Update .coveragerc * Removed unnecessary code * Use more-specific value for X-Object-Manifest header * Allow description fields to be translated in schema * Mark password config options with secret * Update HACKING.rst per recent changes * Encrypt scrubber marker files * Quote action strings before passing to registry * Fixes 'not in' operator usage * Add to multi-tenant swift store documentation * Replace nose plugin with testtools details * Convert some prints to addDetails calls * Rearrange db tests in prep for testr * Stop using detailed-errors plugin for nose * Add \_FATAL\_EXCEPTION\_FORMAT\_ERRORS global * Fix kwargs in xattr BadDriverConfiguration exc * Prints list-cached dates in isoformat * Fail sensibly if swiftclient absent in test * Initialize CONF properly in store func tests * Ensure swift\_store\_admin\_tenants ACLs are set * Remove Swift location/password from messages * Removed unnecessary code * Removed unncessary code * Pull in tarball version fix from oslo * Updated image loop to not use an enumerator * Log exception details * Update version code from oslo * Revert "Avoid testtools 0.9.25" * Avoid testtools 0.9.25 * Update glance config files with log defaults * Sync latest cfg and log from oslo-incubator * Make v2 image tags test not load system policy * Replace custom tearDown with fixtures and cleanup * Update version code from oslo * Use testtools for unittest base class * Stub out find\_file... fix policy.json test issue * Remove unused declaration in images.py * Add import for filesystem\_store\_datadir config * Update v1/images DELETE so it returns empty body * Relax version constraint on Webob-1.0.8 * Set content-length despite webob * Update common openstack code from oslo-incubator * Modify the v2 image tags to use domain model * Fix broken link in docs to controllingservers * Adding a means for a glance worker to connect back to a pydevd debugger * Use imported exception for update\_store\_acls * Fix import order nits * Verify size in addition to checksum of uploaded image * Use one wsgi app, one dbengine worker * Set Content-MD5 after calling webob.Response.\_app\_iter\_\_set * Modify the v2 image controller to use domain model * Log error on failure to load paste deploy app * Configure endpoint\_type and service\_type for swift * Refactor multi-tenant swift store * Add registry\_client\_timeout parameter * Use io.BufferedIOBase.read() instead of io.BytesIO.getvalue() * Port to argparse based cfg * wsgi.Middleware forward-compatibility with webob 1.2b1 or later * Allow running testsuite as root user * Allow newer boto library versions * Fixed image not getting deleted from cache * Updates keystone middleware classname in docs * v2 API image upload set image status to active * Use auth\_token middleware from python-keystoneclient * Add domain proxies that stop unauthorized actions * Add domain proxies that do policy.enforce checks * Use 'notifications' as default notification queue name * Unused variables removed * Fixed deleted image being downloadable by admin * Rewrite S3 functional tests * Add store test coverage for the get\_size method * Implement get\_size filesystem store method * Add an image repo proxy that handles notifications * Fixed Typo * Return size as int from store get call * Wrap log messages with \_() * Add pep8 ignore options to run\_tests.sh * Fix typo uudiutils -> uuidutils * Make cooperative reader always support read() * Add an image proxy to handle stored image data * Allow for not running pep8 * Refactor where store drivers are initialized * Audit error logging * Stop logging all registry client exceptions * Remove unused imports * Add note about urlencoding the sql\_connection config opt * Add an image repo to encapsulate db api access * Add an image domain model and related helpers * Fix simple db image\_get to look like sqlalchemy * Return 403 on images you can see but can't modify * Fixes is\_image\_visible to not use deleted key * Ensure strings passed to librbd are not unicode * Use generate\_uuid from openstack common * Update uuidutils from openstack common * Code cleanup: remove ImageAddResult class * Lowering certain log lines from error to info * Prevent infinite respawn of child processes * Make run\_tests.sh run pep8 checks on bin * Make tox.ini run pep8 checks on bin * Pep8 fixes to bin/glance\* scripts * Ensure authorization before deleting from store * Port uuidutils to Glance * Delete from store after registry delete * Unit test remaining glance-replicator methods * Use openstack common timeutils in simple db api * Unit test replication\_dump * pin sqlalchemy to the 0.7 series * DRY up image fetch code in v2 API * Return 403 when admin deletes a deleted image * Pull in a versioning fix from openstack-common * Fixes deletion of invalid image member * Return HTTP 404 for deleted images in v2 * Update common to 18 October 2012 * implements selecting version in db sync * add command "status" to "glance-control" * Disallow admin updating deleted images in v2 api * Clean up is\_public filtering in image\_get\_all * SSL functional tests always omitted * Fix scrubber not scrubbing with swift backend * Add OpenStack trove classifier for PyPI * Disallow updating deleted images * Unit test replication\_size * Add noseopts and replace noseargs where needed to run\_test.sh * Setup the pep8 config to check bin/glance-control * Change useexisting to extend\_existing to fix deprecation warnings * Fix fragile respawn storm test * Fix glance filesystem store race condition * Add support for multiple db test classes * Don't parse commandline in filesystem tests * Improve test coverage for replicator's REST client * Correct conversion of properties in headers * Add test for v2 image visibility * change the default sql connection timeout to 60s * Add test for v1 image visibility * FakeAuth not always admin * Add GLANCE\_TEST\_TMP\_DIR environment var for tests * Call setup\_s3 before checking for disabled state * Add insecure option to registry https client * Clean up pep8 E128 violations * Rename non-public method in sqlalchemy db driver * Add image\_member\_update to simple db api * Multiprocess respawn functional test fix * Remove unnecessary set\_acl calls * Clean up pep8 E127 violations * Remove notifications on error * Change type of rabbit\_durable\_queues to boolean * Pass empty args to test config parser * Document api deployment configuration * Clean up pep8 E125 violations * Clean up pep8 E124 violations * Ensure workers set to 0 for all functional tests * image\_member\_\* db functions return dicts * Alter image\_member\_[update|delete] to use member id * Add test for db api method image\_member\_create * Add test for image\_tag\_set\_all * Add rabbit\_durable\_queues config option * Remove extraneous db method image\_property\_update * Update docs with modified workers default value * Replace README with links to better docs * Remove unused animation module * Drop Glance Client * Enable multi-processing by default * Ensure glance-api application is "greened" * Clean up pep8 E122, E123 violations * Clean up pep8 E121 violations * Fix scrubber start & not scrubbing when not daemon * Clean up pep8 E502, E711 violations * Expand cache middleware unit tests * Change qpid\_heartbeat default * Don't WARN if trying to add a scheme which exists * Add unit tests for size\_checked\_iter * Add functional tests for the HTTP store * Generalize remote image functional test * Add filesystem store driver to new func testing * Add region configuration for swift * Update openstack-common log and setup code * Update v2.0 API version to CURRENT * Set new version to open Grizzly development * Add s3\_store\_bucket\_url\_format config option * Ensure status of 'queued' image updated on delete * Fallback to a temp pid file in glance-control * Separate glance cache client from main client * Rewrite Swift store functional tests * Raise bad request early if image metadata is invalid * Return actual unicode instead of escape sequences in v2 * Handle multi-process SIGHUP correctly * Remove extraneous whitespace in config files * Remove db auto-creation magic from glance-manage * Makes deployed APIs configurable * Asynchronously copy from external image source * Sort UUID lists in test\_image\_get\_all\_owned * Call do\_start correctly in glance-control reload * Sync some misc changes from openstack-common * Sync latest cfg changes from openstack-common * Exception Handling for image upload in v2 * Fix cache not handling backend failures * Instantiate wsgi app for each worker * Require 'status' in simple db image\_create * Drop glance client + keystone config docs * Use PATCH instead of PUT for v2 image modification * Delete image from backend store on delete * Document how to deploy cachemanage middleware * Clean up comments in paste files * WARN and use defaults when no policy file is found * Encode headers in v1 API to utf-8 * Fix LP bug #1044462 cfg items need secret=True * Always call stop\_servers() after having started them in tests * Adds registry logging * Filter out deleted image properties in v2 api * Limit simple db image\_create to known image attrs * Raise Duplicate on image\_create with duplicate id * Expand image\_create db test * Add test for nonexistent image in db layer * Catch pruner exception when no images are cached * Remove bad error message in glance-cache-manage * Add missing columns to migration 14 * Adds notifications for images v2 * Move authtoken config out of paste * Add kernel/ramdisk\_id, instance\_uuid to v2 schema * Tweak doc page titles * Drop architecture doc page * Add link to notifications docs on index * Remove repeated image-sharing docs * Tidy up API docs * Log level for BaseContextMiddleware should be warn * Raise Forbidden exception in image\_get * Activation notification for glance v1 api * Add glance/versioninfo to MANIFEST.in * HTTPBadRequest in v2 on malformed JSON request body * PEP8 fix in conf.py * Typo fix in glance: existant => existent * Rename glance api docs to something more concise * Drop deprecated client docs * Clean up policies docs page * Remove autodoc and useless index docs * Add nosehtmloutput as a test dependency * Remove partial image data when filesystem is full * Add 'bytes' to image size rejection message * Add policy check for downloading image * Convert limiting\_iter to LimitingReader * Add back necessary import * Adds glance registry req id to glance api logging * Make max image size upload configurable * Correctly re-raise exception on bad v1 checksum * Return httplib.HTTPResponse from fake reg conn * Add DB Management docs * Fix auth cred opts for glance-cache-manage * Remove unused imports * Set proper auth middleware option for anon. access * multi\_tenant: Fix 'context' is not defined error * Validate uuid-ness in v2 image entity * v2 Images API returns 201 on image data upload * Fixes issue with non string header values in glance client * Fix build\_sphinx setup.py command * Updates Image attribute updated\_at * Add policy enforcment for v2 api * Raise 400 error on POST/PUTs missing request bodies * Mark bin/glance as deprecated * Return 201 on v2 image create * Ignore duplicate tags in v2 API * Expose 'protected' image attribute in v2 API * Move to tag-based versioning * Update restrictions on allowed v2 image properties * Reveal v2 API as v2.0 in versions response * Add min\_ram and min\_disk to v2 images schema * Filter out None values from v2 API image entity * Refactor v2 images resource unit tests * Use container\_format and disk\_format as-is in v2 * Make swift\_store\_admin\_tenants a ListOpt * Update rbd store to allow copy-on-write clones * Call stop\_servers() in direct\_url func tests * Drop unfinshed parts of v2 API * Fix a couple i18n issues in glance/common/auth.py * Sync with latest version of openstack.common.notifier * Sync with latest version of openstack.common.log * Sync with latest version of openstack.common.timeutils * Sync with latest version of openstack.common.importutils * Sync with latest version of openstack.common.cfg * Allows exposing image location based on config * Do not cache images that fail checksum verfication * Omit deleted properties on image-list by property * Allow server-side validation of client ssl certs * Handle images which exist but can't be seen * Adds proper response checking to HTTP Store * Use function registration for policy checks * fix the qpid\_heartbeat option so that it's effective * Add links to image access schema * ^c shouldn't leave incomplete images in cache * uuid is a silly name for a var * Support master and slave having different tokens * Add a missing header strip opportunity * URLs to glance need to be absolute * Use with for file IO * Add swift\_store\_admin\_tenants option * Update v1/v2 images APIs to set store ACLs * Use event.listen() instead of deprecated listeners kwarg * Store context in local thread store for logging * Process umask shouldn't allow world-readable files * Make TCP\_KEEPIDLE configurable * Reject rather than ignore forbidden updates * Raise HTTPBadRequest when schema validation fails * Expose 'status' on v2 image entities * Simplify image and access\_record responses * Move optional dependencies from pip-requires to test-requires * Fix dead link to image access collection schema * Add in missing image collection schema link * Drop static API v2 responses * Include dates in detailed image output * Update image caching middleware for v2 URIs * Ensure Content-Type is JSON-like where necessary * Have non-empty image properties in image.delete payload * Add Content-MD5 header to V2 API image download * Adds set\_acls function for swift store * Store swift images in separate containers * Include chunk\_name in swift debug message * Set deleted\_at field when image members and properties are deleted * Use size\_checked\_iter in v2 API * Honor '--insecure' commandline flag also for keystone authentication * Make functional tests listen on 127.0.0.1 * Adds multi tenant support for swift backend * Provide stores access to the request context * Increase wait time for test\_unsupported\_default\_store * Match path\_info in image cache middleware * Dont show stack trace on command line for service error * Replace example.com with localhost for some tests * Fix registry error message and exception contents * Move checked\_iter from v1 API glance.api.common * Support zero-size image creation via the v1 API * Prevent client from overriding important headers * Updates run\_tests.sh to exclude openstack-common * Use openstack.common.log to log request id * Update 'logging' imports to openstack-common * Make get\_endpoint a generic reusable function * Adds service\_catalog to the context * Add openstack-common's local and notifier modules * Making docs pretty! * Removing 'Indices and tables' heading from docs * Remove microseconds before time format conversion * Add bin/glance-replicator to scripts in setup.py * Initial implementation of glance replication * Generate request id and return in header to client * Reorganize context module * Add openstack.common.log * Ignore openstack-common in pep8 check * Keystone dep is not actually needed * Report size of image file in v2 API * Expose owner on v2 image entities * Add function tests for image members * Allow admin's to modify image members * Allow admins to share images regardless of owner * Improve eventlet concurrency when uploading/downloading * Simplify v2 API functional tests * Fix IndexError when adding/updating image members * Report image checksum in v2 API * Store properties dict as list in simple db driver * Use PyPI for swiftclient * Refactor pagination db functional tests * Combine same-time tests with main db test case * Add retry to server launch in respawn test * Reorder imports by full import path * Adds /v2/schemas/images * Implement image filtering in v2 * Include all tests in generated tarballs * Allow CONF.notifier\_strategy to be a full path * Add image access records schema for image resources * Remove image members joinedload * Clean up image member db api methods * Retry test server launch on failure to listen * Make image.upload notification send up2date metadata * Added schema links logic to image resources * Simplify sqlalchemy imports in driver * Reduce 'global' usage in sqlalchemy db driver * Standardize logger instantiation * Add link descriptor objects to schemas * Fix exception if glance fails to load schema * Move the particulars of v2 schemas under v2 * Remove listing of image tags * Set up Simple DB driver tests * Trace glance service on launch failure * Revert "Funnel debug logging through nose properly." * Capture logs of failing services in assertion msg * Remove some more glance-cache PasteDeploy remnants * Fix typo of conf variable in config.py * Remove unused imports in db migrations * Increase timeout to avoid spurious test failures * adds missing import and removes empty docstring * Convert db testing to use inheritance * Clean up .pyc files before running tests * make roles case-insensitive * Funnel debug logging through nose properly * Fix typo of swift\_client/swiftclient in store\_utils * Stop revealing sensitive store info * Avoid thread creation prior to service launch * Don't use PasteDeploy for scrubber and cache daemons * Remove some unused glance-cache-queue-image code * Implement pagination and sorting in v2 * Turn off SQL query logging at log level INFO * Default db\_auto\_create to False * Use zipballs instead of git urls * Add metadata\_encryption\_key to glance-cache.conf * Fix help messages for --debug * Use python-swiftclient for swift store * Fix to not use deprecated response.environ any more * Import db driver through configuration * Move RequestContext.is\_image\_\* methods to db layer * Begin replacement of sqlalchemy driver imports * webob exception incorrectly used in v1 images.py * Add tests and simplify GlanceExceptions * Update default values for known\_stores config * Remove the conf passing PasteDeploy factories * Port remaining code to global conf object * Made changes to adhere to HACKING.rst specifications * Use openstack-common's policy module * Re-add migrate.cfg to tarball * Implements cleaner fake\_request * Create 'simple' db driver * Glance should use openstack.common.timeutils * Clean up a few ugly bits from the testing patch * Fix typo in doc * Add cfg's new global CONF object * fix side effects from seekability test on input file * Just use pure nosetests * Fix coverage jobs. Also, clean up the tox.ini * Move glance.registry.db to glance.db * Glance should use openstack.common.importutils * Add read-only enforcement to v2 API * Add a base class for tests * Expose tags on image entities in v2 API * Add additional info. to image.delete notification * Expose timestamps on image entities in v2 API * Sync with latest version of openstack.common.cfg * Enable anonymous access through context middleware * Add allow\_additional\_image\_properties * Fix integration of image properties in v2 API * Lock pep8 at v1.1 * Lock pep8 to version 0.6.1 in tox.ini * Fail gracefully if paste config file is missing * Add missing files to tarball * Remove unused imports in setup.py * Adds sql\_ config settings to glance-api.conf * Correct format of schema-image.json * Fix paste to correctly deploy v2 API * Add connection timeout to glance client * Leave behind sqlite DB for red functional tests * Support DB auto-create suppression * Fix glance-api process leak in respawn storm test * Stubout httplib to avoid actual http calls * Backslash continuation removal (Glance folsom-1) * Implement image visibility in v2 API * Add min\_ram and min\_disk to bin/glance help * Implements blueprint import-dynamic-stores * Add credential quoting to Swift's StoreLocation * Combine v2 functional image tests * Simplify JSON Schema validation in v2 API * Expose deployer-specific properties in v2 API * Test that v2 deserializers use custom schemas * Load schema properties when v2 API starts * Support custom properties in schemas for v2 API * Fix tiny format string nit in log message * Fixes bug 997565 * Allow chunked image upload in v2 API * wsgi: do not respawn on missing eventlet hub * Implement v2 API access resource * Disallow image uploads in v2 API when data exists * Implement v2 API image tags * Use ConfigOpts.find\_file() for policy and paste * Implement image data upload/download for v2 API * Use sdist cmdclass from openstack-common * glance-api: separate exit status from message * Update noauth caching pipeline to use unauth-ctx * Return 204 from DELETE /v2/images/ * Add localization catalog and initial po files to Glance. Fix bug 706449 * Add /v2 to sample glance-api-paste.ini * Basic functionality of v2 /images resource * Split noauth context middleware into new class * Add -c|--coverage option to run\_tests.sh * Convert glance to glance/openstack/common/setup.py * Update glance to pass properly tenant\_name * Cleanup authtoken examples * Support for directory source of config files * Support conf from URL's with versions * Auto generate AUTHORS file for glance * Integrate openstack-common using update.py * Fixes LP #992096 - Ensure version in URL * Begin functional testing of v2 API * Fixes LP #978119 - cachemanagement w/o keystone * Omit Content-Length on chunked transfer * Fix content type for qpid notifier * Remove \_\_init\_\_.py from locale dir * Fix i18n in glance.notifier.notify\_kombu * Override OS\_AUTH\_URL when running functional tests * remove superfluous 'pass' * fix bug lp:980892,update glance doc * Add a space to fix minor typo in glance help * Suppress pagination on non-tty glance index * Kill glance-api child workers on SIGINT * Ensure swift auth URL includes trailing slash * add postgresql support to test\_migrations * 012\_id\_to\_uuid: Also convert ramdisk + kernel ids * API v2 controller/serialization separation * search for logger in PATH * Set install\_requires in setup.py * Minor grammar corrections * Bootstrapping v2 Image API implementation * Fix db migration 12 * Remove unused imports * Reorganize pipelines for multiple api versions * Skip test depending on sqlite3 if unavailable * Defaulted amazon disk & container formats * Compile BigInteger to INTEGER for sqlite * Updated RST docs on containers, fewer references to OVF format * rename the right index * Reject excessively long image names * Test coverage for update of image ownership * Add MySQLPingListener() back * Add support for auth version 2 * Run version\_control after auto-creating the DB * Allow specifying the current version in 'glance-manage version\_control' * Publish v2 in versions responses * Allow yes-like values to be interpreted as bool * Support owner paramater to glance add * Adding versioned namespaces in test dir * Typo * Ensure functional db connection in configure\_db() * Set content\_type for messages in Qpid notifier * Avoid leaking secrets into config logging * Fixes lp959670 * Send output of stty test cmd to stderr * Use unique per-test S3 bucket name * Specify location when creating s3 bucket * Open Folsom * Update 'bin/glance add' docstring \*\_format options * Ensure all unauthorized reponses return 403 * Avoid leaking s3 credentials into logs * Avoid glance-logcapture displaying empty logs * Add 'publicize\_image' policy * Fixed db conn recovery issue. Fixes bug 954971 * tox tests with run\_tests.sh instead of nosetests * Don't use auth url to determine service protocol * Use tenant/user ids rather than names * Update context middleware with supported headers * Fixes LP #957401 - Remove stray output on stderr * check connection in Listener. refer to Bug #943031 * Avoid tests leaking empty tmp dirs * Remove keystone.middleware.glance\_auth\_token * Updating version of Keystone * Add policy checks for cache manage middleware * nose plugin to capture glance service logs * Add new UnexpectedStatus exception * Do not error when service does not have 'type' * Disambiguates HTTP 401 and HTTP 403 in Glance. Fixes bug 956513 * Add admin\_role option * Remove references to admin\_token * Remove glance-cache-queue-image * Remove dependency on apiv1app from cachemanage * Return 403 when policy engine denies action * Add error checking to get\_terminal\_size * Well-formed exception types for 413 & 503 * Ensure copy and original image IDs differ * Include babel.cfg and glance.pot in tarballs * Updating authentication docs * General cleanup * General docs cleanup * Remove todolist from docs * Add note about cache config options * Change CLIAuth arg names * Retry sendfile on EAGAIN or EBUSY * Add module name to ClientException * Update cli docs * Remove 'community' doc page * Removing registry spec from docs * Fixes LP#934492 - Allow Null Name * Refresh SSL cfg after parsing service catalog entry * Fix typo in tox.ini * Glance cache updates to support Keystone Essex * updates man page for glance-scrubber. this time with extra pep8 scrubbing powers. Fixes bug 908803 * Update tox.ini for jenkins * Replaced use of webob.Request.str\_param * Update paste file to use service tenant * Update bin/glance to allow for specifying image id * Fix deprecated warnings * Remove trailing whitespaces in regular file * add git commit date / sha1 to sphinx html docs * Glance skip prompting if stdin isn't a tty * Allow region selection when using V2 keystone * Disallow file:// sources on location or copy-from * Progress bar causes intermittent test failures * Added first step of babel-based translations * Complete fix for modification of unowned image * Fix update of queued image with location set * Support copy-from for queued images * Add checksum to an external image during add * Align to jenkins tox patterns * Fix MANIFEST.in to include missing files * Fix exception name * Correct kernel/ramdisk example in docs * Create sorting/pagination helper function * Support new image copied from external storage * blueprint progressbar-upload-image * Avoid TestClient error on missing '\_\_mro\_\_' attr * disk/container\_format required on image activate * Require container & disk formats on image create * Support non-UTC timestamps in changes-since filter * Return 503 if insufficient permission on filestore * Adds README.rst to the tarball * Ensure StorageFull only raised on space starvation * Require auth URL if keystone strategy is enabled * 003\_add\_disk\_format.py: Avoid deadlock in upgrade * Function uses 'msg' not 'message' * Fix paging ties * Ensure sane chunk size when pysendfile unavailable * New -k/--insecure command line option * Add a generic tox build environment * Fix pep8 error * Update Authors file * Implement blueprint add-qpid-support * Include glance/tests/etc * Don't fail response if caching failed * Force auth\_strategy=keystone if --auth\_url or OS\_AUTH\_URL is set * Make Glance work with SQLAlchemy 0.7 * Use sendfile() for zero-copy of uploaded images * Respawn glance services on unexpected death * Blueprint cli-auth: common cli args * Prep tox config for jenkins builds * Get rid of DeprecationWarning during db migration * Add --capture-output option to glance-control * Add filter validation to glance API * Fixes LP 922723 * Typofix is\_publi -> is\_public * Add --await-child option to glance-control * Fix Bug #919255 * Cap boto version at 2.1.1 * Simplify pep8 output to one line per violation * Handle access restriction to public unowned image * Check service catalogue type rather than name * Restore inadvertantly dropped lines * Include the LICENSE file in the tarball * Change xattr usage to be more broadly compatible * Fix mixed usage of 's' and 'self' * Don't force client to supply SSL cert/key * Few small cleanups to align with Nova * Adds documentation for policy files * Client.add\_image() accepts image data as iterable * More flexible specification of auth credentials * glance-api fails fast if default store unsupported * Bug #909574: Glance does not sanity-check given image size on upload * glance-control need not locate a server's config file (lp#919520) * Bug#911599 - Location field wiped on update * Return 400 if registry returns 400 * Set url's on AuthBadRequest exceptions * Add policy checking for basic image operations * Swallow exception on unsupported image deletion * Ensure we only send a single content-type header * Multi-process Glance API server support * Set size metadata correctly for remote images * Make paste.ini file location configurable * Avoid the need for users to manually edit PasteDeploy config in order to switch pipelines * Split out paste deployment config from the core glance \*.conf files into corresponding \*-paste.ini files * Fixes LP Bug#913608 - tests should be isolated * Set correct Content-Length on cached remote images * Implement retries in notify\_kombu * Return correct href if bind\_host is 0.0.0.0 * Remove assertDictEqual for python 2.6 compatibility * Add optional revision field to version number * LP Bug#912800 - Delete image remain in cache * Add notifications for sending an image * Bug #909533: Swift uploads through Glance using ridiculously small chunks * Add Fedora clauses to the installing document * Remove doc/Makefile * Fixes incorrect URI scheme for s3 backend * Add comments for swift options in glance-api.conf * Split notification strategies out into modules * fix bug 911681 * Fix help output for inverse of BoolOpt * PEP8 glance cleanup * Add more man pages * Set execute permissions on glance-cache-queue-image * Add a LICENSE file * Add ability to specify syslog facility * Install an actual good version of pip * Bug #909538: Swift upload via Glance logs the password it's using * Add tox.ini file * Synchronize notification queue setup between nova and glance * Fixes keystone auth test failures in python 2.6 * Removed bin/glance's TTY detection * Fixes request with a deleted image as marker * Adds support for protecting images from accidental deletion * Fix for bug 901609, when using v2 auth should use /v2.0/tokens path * Updated glance.registry.db for bug 904863 * Removing caching cruft from bin/glance * Fixes LP Bug#901534 - Lost properties in upload * Update glance caching middleware so doesn't try to process calls to subresources. Fixes LP bug #889209 * Ensure functional tests clean up their images * Remove extra swift delete\_object call * Add missing files to tarball * Allow glance keystone unit tests to run with essex keystone * Convert glance to use the new cfg module * Add new cfg module * Lock keystone to specific commit in pip-requires * Add the missing column header to list-cached * Rename 'options' variables to 'conf' * Add generic PasteDeploy app and filter factories * Secondary iteration of fix for bug 891738 * Rename .glance-venv to .venv * Fix for bug 900258 -- add documentation for '--url' glance cli option * Add --url option to glance cli * Fixes LP Bug#850377 * Fixes LP Bug#861650 - Glance client deps * Added some examples for "glance add" * Bug#894027: use correct module when building docs * Adds option to set custom data buffer dir * Fix bug 891738 * Added missing depend on nosexcover * Removed some cruft * Fixes LP Bug#837817 - bin/glance cache disabled * Separating add vs general store configuration * Fixes LP Bug#885341 - Test failure in TestImageCacheManageXattr * Making prefetcher call create\_stores * Fix handle get\_from\_backend returning a tuple * Casting foreign\_keys to a list in order to index into it * Using Keystone's new port number 35357 * Adding admin\_token to image-cache config * Removing assertGreaterEqual * Correcting image cleanup in cache drivers * Adding tests to check 'glance show ' format * Update 'glance show' to print a valid URI. Fixes bug #888370 * Gracefully handle image\_cache\_dir being undefined * Remove unused versions pipeline from PasteDeploy config * Allow glance-cache-\* find their config files * Add some test cases for glance.common.config * Fix name error in cache middleware * Check to make sure the incomplete file exists before moving it during rollback. Fixes bug #888241 * Fix global name 'sleep' is not defined in wsgi.py. Fixes bug #888215 * Fixes LP Bug#878411 - No docs for image cache * Fix typo in the cached images controller * load gettext in \_\_init\_\_ to fix '\_ is not defined' * Adds option to encrypt 'location' metadata * Fix LP Bug#885696 two issues with checked\_iter * Fix Keystone API skew issue with Glance client * Fixed test failure in Python 2.6 * Glance redirect support for clients * Fixes LP Bug#882185 - Document Swift HTTPS default * Fixes LP Bug#884297 - Install docs should have git * Add "import errno" to a couple of files * Consolidate glance.utils into glance.common.utils * Correcting exception handling in glance-manage * More cache refactoring - Management Middleware * Fixes LP Bug#882585 - Backend storage disconnect * Convert image id value to a uuid * Remove 'location' from POST/PUT image responses * Removing glance-upload * Adds Driver Layer to Image Cache * Removed 'mox==0.5.0' and replaced with just 'mox' in tools/pip-requires * Removing duplicate mox install in pip-requires * Add .gitreview config file for gerrit * Making TCP\_KEEPIDLE socket option optional * Overhauls the image cache to be truly optional * Fixing functional tests that require keystone * Fixes LP Bug#844618 - SQLAlchemy errors not logged * Additions to .gitignore * Better document using Glance with Keystone * Fixes LP Bug#872276 - small typo in error message * Adds SSL configuration params to the client * Increases test coverage for the common utils * Refactoring/cleanup around our exception handling * Port Authors test to git * Add RBD store backend * Fixes LP Bug#860862 - Security creds still shown * Extract image members into new Glance API controller * Refactoring registry api controllers * Returning functionality of s3 backend to stream remote images * Make remote swift image streaming functional * Improving swfit store uri construction * Fixes LP Bug #850685 * Do not allow min\_ram or min\_disk properties to be NULL and if they are None, make sure to default to 0. Fixes bug 857711 * Implementing changes-since param in api & registry * Documenting nova\_to\_os\_env.sh tool * Added min\_disk and min\_ram properties to images Fixes LP Bug#849368 * Fixing bug 794582 - Now able to stream http(s) images * Fixes LP Bug#755916 - Location field shows creds * Fixes LP Bug #804429 * Fixes Bug #851216 * Fixes LP Bug #833285 * Fixes bug 851016 * Fix keystone paste config for functional tests * Updating image status docs * \* Scrubber now uses registry client to communicate with registry \* glance-api writes out to a scrubber "queue" dir on delete \* Scrubber determines images to deleted from "queue" dir not db * Fixes LP Bug#845788 * Open Essex * Remove PWD from possible config\_file\_dirs * Update paste config files with keystone examples. see ticket: lp839559 * Adding Keystone support for Glance client * Fix cached-images API endpoint * Bug fix lp:726864 * Fixes Bug: lp825024 * Add functional tests * Switch file based logging to WatchedFileHandler for logrotate * Fixes LP Bug #827660 - Swift driver fail 5G upload * Bug lp:829064 * Bug lp:829654 * Update rfc.sh to use 'true' * Addresses glance/+spec/i18n * Addresses glance/+spec/i18n * Add rfc.sh for git review * Add support for shared images * Add notifications for uploads, updates and deletes * Bug Fix lp:825493 * Bug fix lp:824706 * Adds syslog support * Fixes image cache enabled config * Improves logging by including traceback * Addresses glance/+spec/i18n * casting image\_id to int in db api to prevent false matching in database lookups * Addresses Bug lp:781410 * Removes faked out datastore entirely, allowing the DB API to be unit tested * Consolidates the functional API test cases into /glance/tests/functional/test\_api.py, adds a new Swift functional test case, verified that it works on Cloud Files with a test account * breaking up MAX\_ITEM\_LIMIT and making the new values configurable * Add @skip\_if\_disabled decorator to test.utils and integrate it into the base functional API test case. The S3 functional test case now uses test\_api.TestApi as its base class and the setUp() method sets the disabled and disabled\_message attributes that the @skip\_if\_disabled decorator uses * Adds swift\_enable\_snet config * Fixes bug lp:821296 * Detect python version in install\_venv * Implemented @utils.skip\_test, @utils.skip\_unless and @utils.skip\_if functionality in glance/test/utils.py. Added glance/tests/unit/test\_skip\_examples.py which contains example skip case usages * Changed setup.py to pull version info from git * Removes the call to webob.Request.make\_body\_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set\_contents\_from\_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 * Make sure we're passing the temporary file in a read-mode file descriptor to S3 * Removes the call to webob.Request.make\_body\_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set\_contents\_from\_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 * - removed curl api functional tests - moved httplib2 api functional tests to tests/functional/test\_api.py * merging trunk * Make tests a package under glance * removing curl tests and moving httplib2 tests * Move tests under the glance namespace * Add filter support to bin/glance index and details calls * merging trunk * Update registry db api to properly handle pagination through sorted results * Our code doesn't work with python-xattr 0.5.0, and that's the version installed in RH/Centos :( Andrey has updated the RPM config to specify 0.6.0, and this does the same to pip-requires * Replaced occurances of |str(e)| with |"%s" % e| * First round of refactoring on stores * Remove expected\_size stuff * Make calling delete on a store that doesn't support it raise an exception, clean up stubout of HTTP store and testing of http store * adding sort\_key/sort\_dir to details * merging lp:~rackspace-titan/glance/registry-marker-lp819551 * adding sort\_key/sort\_dir params * adding --fixes * adding complex test cases to recreate bug; updating db api to respect marker * Add configuration check for Filesystem store on configure(), not every call to add() * Refactor S3 store to make configuration one-time at init versus every method call invocation * Refactor Swift store to make configuration one-time at init versus every method call invocation * Forgot to add a new file.. * Refactors stores to be stateful: * Make sure xattr>=0.6.0 in pip-requires * updating documentation * making limit option an integer * updating broken tests * adding limit/marker to bin/glance details call * adding limit/marker params to bin/glance index * merging trunk * Use of "%default" in help string does not work, have to use "%(default)s". Per the 4th example http://docs.python.org/dev/library/argparse.html#prog * Added nose-exclude to pip-requires * Installed nose-exclude, ./run\_tests.sh --unittests-only add '--exclude-dir=tests/functional' to NOSEARGS * This one has been bugging me for a while, finally found out how to use the local default variable in the help string * adding --fixes to commit * Replaced occurances of |str(e)| with |"%s" % e| * Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact * Fix for boto1.9b issue 540 (http://code.google.com/p/boto/issues/detail?id=540) * Remove unnecessary hashlib entry in pip-requires * Add myself to Authors (again) * hashlib exists all of the way back to python 2.5, there's no need to install an additional copy * Adds image\_cache\_enabled config needed to enable/disable the image-cache in the glance-api * Add more unit tests for URI parsing and get\_backend\_class() (which is going away in refactor-stores branch, but oh well..) * Added unit tests for swift\_auth\_url @property. It was broken. startwith('swift+http') matches swift+https first * Don't tee into the cache if that image is already being written * Re-add else: raise * Final fixes merging Rick's swift\_auth\_url @property with previous URI parsing fixes that were in the S3 bug branch.. * merge trunk * This updates the pep8 version in pip-requires and updates run\_tests.sh to provide a '-p' option that allows for just pep8 to be run * Adding back image\_cache\_enabled config option for glance-api * Don't tee same image into cache multiple times * Fixes two things: * adding run\_tests.sh -p * PEP8 whitespace fix * Swift client library needs scheme * Add tests for bad schemes passed to get\_backend\_class() * Add tests for bad URI parsing and get\_backend\_class() * Include missing bin/glance-scrubber in tarball * Include bin/glance-scrubber in tarball binaries * One more auth\_tok-related change, to make it easier for nova to use the client without violating any abstraction boundaries * Add fix for Bug #816386. Wait up to 5 min for the image to be deleted, but at least 15 seconds * remove superfluous if statement * Loop up to 5 min checking for when the scrubber deletes * Typo in error condition for create\_bucket\_on\_put, make body seekable in req object, and remove +glance from docs and configs * Add functional test case for checking delete and get of non-existing image * New local filesystem image cache with REST managment API * PEP8 Fixes * Using DELETE instead of POST reap\_invalid, reap\_stalled * Forgot to put back fix for the get\_backend\_class problem.. * Adding logging if unable to delete image cache file * Add test case for S3 s3\_store\_host variations and fixes for URL bug * Ensure image is active before trying to fetch it * Boy, I'm an idiot...put this in the wrong branch directory.. * Handling ZeroDivision Error * Using alternate logging syntax * Missing import of common.config in S3 driver * Tighten up file-mode handling for cache entry * Adding request context handling * Merging trunk * Fixed review stuff from Brian * Allow delaying the actual deletion of an image * have the scrubber init a real context instead of a dict * merge trunk * Adds authentication middleware support in glance (integration to keystone will be performed as a piece of middleware extending this and committed to the keystone repository). Also implements private images. No limited-visibility shared image support is provided yet * Take out extraneous comments; tune up doc string; rename image\_visible() to is\_image\_visible(); log authorization failures * use runs\_sql instead of hackery * Updating setup.py per bin/image\_cache removal * Removing bin/image\_cache directory * Removing cache enabled flag from most confs * Removing imagecache from default WSGI pipeline * Allow plugging in alternate context classes so the owner property and the image\_visible() method can be overridden * Make a context property 'owner' that returns the tenant; this makes it possible to change the concept of ownership by using a different context object * Unit tests for the context's image\_visible() routine * We don't really need elevate().. * Merging in adding\_image\_caching * Importing module rather than function * PEP 8 fixes * Adding reap stalled images * Returning number of files deleted by cache-clear * Returning num\_reaped from reap\_invalid * Moving bin to image\_cache/ * Fixing comment * Adding reaper script * Adding percent done to incomplete and invalid image listing * Renaming tmp\_path to incomplete\_path * Renaming tmp\_path to incomplete\_path * Renaming purge\_all clear, less elegant variation * Refactor to use lookup\_command, so command map is used in one place * Refactoring to use same command map between functions * Renaming to cache-prefetching * Renaming to cache-prefetch * Renaming to cache-purge-all * Renaming to cache-purge * Renaming to cache-invalid * Beginning to normalize names * Refactoring out common code * Refactoring prefetch * Refactoring purge * Refactoring purge\_all * Refactoring listing of prefetching images * Using querystring params for invalid images * Link incoming context with image owner for authorization decisions * How in the world did I manage to forget this? \*sigh\* * Make tests work again * merge trunk * pull-up from trunk * This patch: * PEP8 nit * Added fix for Bug #813291: POST to /images setting x-image-meta-id to an already existing image id causes a 500 error * One more try.. * Yet another attempt to fix URIs * Add in security context information * Moving cached image list to middleware * Initial work on moving cached\_images to WSGI middleware * API is now returning a 409 error on duplicate POST. I also modified the testcase to expect a 409 response * Add owner to database schema * Fix URI parsing on MacOSX - Python 2.6.1 urlparse bugs * Namespacing xattr keys * PEP8 fixes * Added 3 tests in tests/functional/test\_httplib2\_api.py to validate is\_public filtering works * left in 2 fixes.. removing redundant fix * If meta-data contains an id field, pass it to \_image\_update() * Adding functional test to show bug #813291 * fixed an inline comment * removed pprint import, and added check for other 3 images to make sure is\_public=True * Added 3 tests to validate is\_public filtering works * Completed rewrite of tests/functional/test\_curl\_api.py using httplib2 * Changes the default filtering of images to only show is\_public to actually use a default filter instead of hard coding. This allows us to override the default behavior by passing in a new filter * removing pprint import * completed rewrite of test\_ordered\_images().. this completes rewrite of test\_curl\_api using httplib2 * test\_ordered\_images() missing closing self.stop\_servers() * finished rewrite of test\_filtered\_images() * add tests and make None filters work * Change default is\_public = True to just set a default filter instead of hard coding so it can be overridden * make the tests work with new trunk * merge trunk * Refactoring PrettyTable so it doesn't print the lines itself * Adding pruner and prefetcher to setup.py * Removing extraneous text * PEP 8 fixes * Adding prefetching list to bin/glance * More cleanups * Adding prefetching of images * Overhaul the way that the store URI works. We can now support specifying the authurls for Swift and S3 with either an http://, an https:// or no prefix at all * Typo fix * Removing test exception * PEP 8 fixes * Adding Error to invalid cache images * Show invalid images from bin/glance * Improving comments * Cleaning up cache write * Moving xattrs out to utils * Clip and justify columns for display * Including last accessed time in cached list * Adding more comments * Adding hit counter * Pruning invalid cache entries after grace period * Clear invalid images when purging all cached images * Rollback by moving images to invalid\_path * Improving comments * PEP8 fixes * Adding cached image purge to bin/glance * Adding purge all to bin/glance * Adding catch\_error decorator to bin/glance * Adding 'cached' command to bin/glance * Write incomplete files to tmp path * Adding purge\_all, skip if set if xattrs arent supported * Adding purge cache API call * Adding API call to query for cache entries * Create bin/glance-pruner * Adding image\_caching * rewrote test\_traceback\_not\_consumed(), working on test\_filtered\_images() * Only changes is reverting the patch that added migration to configure\_db() and resets the in-memory SQLite database as the one used in functional testing. Yamahata's commits were unmodified.. * Reverts commit that did db migration during configure\_db() and makes functional tests use in-memory database again. The issues we were seeing had to do with the timeout not being long enough when starting servers with disk-based registry databases and migrate taking too long when spinning up the registry server... this was shown in almost random failures of tests saying failure to start servers. Rather than increase the timeout from 3 seconds, I reverted the change that runs migrate on every startup and cut the total test duration down about 15 seconds * merged glance trunk * updated Authors * Resolves bug lp:803260, by adding a check to ensure req.headers['Accept'] exists before it gets assigned to a variable * run\_tests.py: make test runner accepts plugins * run\_tests.py: make run\_tests.py work * Fix the poor error handling uncovered through bug in nova * Added stop\_servers() to the end of the test cases * adding testing & error handling for invalid markers * removed pprint import * removed extra space on test\_queued\_process\_flow method definition * removing commented out line * merged in lp:~jshepher/glance/functional\_tests\_using\_httplib2\_part2 * applied requested fix in merge-prop * Removing ordering numbers from the test cases, per jay pipes * cleaning up the 'no accept headers' test cases. this should fail until Bug lp:803260 is resolved * Cleaning up docstring spacing * rewrite of test\_size\_greater\_2G\_mysql from test\_curl\_api.py using httplib2. All tests currently pass * completed rewrite of test\_003\_version\_variations. bug lp:803260 filed about step #0, and noted as a comment in code * Fix for bug 803188. This branch also proposed for merging into trunk * miss-numbering of steps * fixing pep8 violation * Added a check to ensure req.headers['Accept'] exists before it gets assigned to a variable. All unit/functional tests pass with this patch * half way done with rewrite of test\_003\_version\_variations.. step #0 causes a 500 error unless we supply an Accept header * Prevent query params from being set to None instead of a dict * removing rogue print * fixing issue where filters are set to None * Backport for bug 803055 * rewrote test\_002\_queued\_process\_flow from test\_curl\_api.py, all 6 steps pass against trunk revno:146 * Backport for bug 803055 * Prevent clients from adding query parameters set to None * ignores None param values passed to do\_request * cleaning up docstrings * merging trunk * docstring * Added sort\_key and sort\_dir query params to apis and clients * fixing one last docstring * docstrings\! * unit/test\_config.py: make it independent on sys.argv * run\_tests.py: make test runner accepts plugins * reverting one import change; another docstring fix * docstring * Switch image\_data to be a file-like object instead of bare string in image creating and updating Without this Glance loads all image into memory, then copies it one time, then writes it to temp file, and only after all this copies image to target repository * Add myself to Authors file * cleaning up None values being passed into images\_get\_all\_public db call * adding base client module * restructuring client code * merging trunk * Explicitly set headers rather than add them * fixing httplib2 functional test that was expecting wrong content-type value * merging trunk * rewrite of test\_get\_head\_simple\_post from tests/functional/test\_curl\_api.py using httplib2 * adding assert to check content\_type in GET /images/ test * Explicitly setting Content-Type, Content-Length, ETag, Location headers to prevent duplication * Bug #801703: No logging is configured for unit tests * Bug #801703: No logging is configured for unit tests * Change image\_data to body\_file instead of body * reset \_MAKER every test and make sure to stop the servers * Trunk merge, changed returned content-type header from 'application/octet-stream' to 'text/html; charset=UTF-8, application/octet-stream' * yea python strings * updated main docstring, as it was directly coppied from test\_curl\_api.py * merged trunk * refactoring for Jay * make image data a constant * Fixes build failures due to webob upgrade. Updated pop-requires as well * upgrading webob and fixing tests * - refactoring wsgi code to divide deserialization, controller, serialization among different objects - Resource object acts as coordinator * updating client docs * fixing bad request error messages * making SUPPORTED\_\* lists into tuples * slight refactoring * updating docs * adding ordering support to glance api * adding support to registry server and client for sort\_key and sort\_dir params * re-ordered imports, using alpha-ordering * removing unnecessary unittest import * moved httplib2 tests to their own test case file, and uncommented md5 match * updating docs; adding support for status filter * adding query filters to bin/glance details * adding query filters to bin/glance index * forgot to remove pprint import * adding hashlib as a dependency to pip-requires (not 100% sure it is not part of the base install though) * fixed pep8 violation * rewote the test #7 - #11 for testcase (test\_get\_head\_simple\_post) * refactoring for Brian * refactoring from Rick's comments * Added httplib2 dependency to tools/pip-requires * rewriting functional tests to utilize httplib2 instead of curl * make sure it runs as a daemon for the tests * default to no daemon * also allow for daemon in the config file so that we can test it easier * default to non-daemon mode * change order of paramaters and make event optional * initial refactoring from Jay's comments * remove eventlet import and leftover function from previous refactoring * remove file that got resurrected by accident * fixed test case * add functional tests of the scrubber and delayed\_delete * start the scrubber in addition to the api and registry * add glance-scrubber to glance-control * call it a Daemon, cuz it is * Update Authors * add the function to the stubs * cleanup * adding tests for wsgi module * removing rogue print * further refactoring * adding refactored wsgi code from nova; moving registry api to new wsgi * delayed scrubbing now works * add the scrubber startup script * remove unnecessary option * add pending\_delete to stub api * pep8 fixed * pep8 fixes * pass in the type we want so it gets converted properly * self leaked ;( * only return the results that we need to act on * allow passing of time to get only results earlier than the time' * server and scrubber work * update the docstring to reflect current * pass in a wakeup\_time for the default time between database hits * start making the server that will periodicly scrub * Config file for the scrubber. We make our own connection to the db here and bypass using the registry client so we don't have to expose non-public images over the http connection * make the commits * Add webob>=1.0.7 requirement to tools/pip-requires * all delayed deletes will be going through a new service, if delayed\_delete is False, then delete it right away, otherwise set it to pending\_delete * add scrub file * set the image to pending delete prior to scheduling the delete * refactor a bit so the db gets updated as needed and we only trigger the delay if the config option is set * add scheduled\_delete\_from\_backend which delays the deletion of images for at least 1 second * don't delete directly but schedule deletion * add the api function to get the images that are pending deleteion * add in delayed delete options * Add workaround for Webob bug issue #12 and fix DELETE operation in S3 where URL parsing was broken * Add ability to create missing s3 bucket on first post, similar to Swift driver * Adding support for marker/limit query params from api, through registry client/api, and implementing at registry db api layer * Bug #787296: test\_walk\_versions fails with SQLalchemy 0.7 * OK, fixes the issue where older versions of webob.Request did not have the body\_file\_seekable attribute. After investigation, turned out that webob.Request.make\_body\_seekable() method was available in all versions of webob, so we use that instead * Added new disk\_format type of 'iso'. Nova can use this information to identify images that have to be booted from a CDROM * adding marker & limit params to glance client * Auto-migrate if the tables don't exist yet * Fix up unit tests for S3 after note from Chris. Also fix bug when S3 test was skipped, was returning error by accident * \* Adds functional test that works with Amazon S3 \* Fixes parsing of "S3 URLs" which urlparse utterly barfs on because Amazon stupidly allows forward slashes in their secret keys \* Update /etc/glance-api.conf for S3 settings * merging trunk, resolving conflicts * fixing sql query * completing marker functionality * Call stop\_servers() for those 2 test cases missing it * Correct documentation * Add missing stop\_servers() calls to two functional test cases * Remove changes to stub database * Auto-migrate if tables don't exist * Fix accidental delete * Remove additions to FIXTURES in test/stubs.py, which requried changes elsewhere * Sync with trunk * Documentation for new results filtering in the API and client * Fix tiny typo * Documentation for new results filtering in the API and client * Adding support for query filtering from the glance client library * renaming query\_params to params * abstracting out filters query param serialization into BaseClient.do\_request * renaming tests to resolve conflict * adding filters param to get\_images and get\_images\_detailed in glance client * Bug #787296: test\_walk\_versions fails with SQLalchemy 0.7 * Updated doc with 'iso' disk\_format * Update documentation * Adding support for api query filtering - equality testing on select attributes: name, status, container\_format, disk\_format - relative comparison of size attribute with size\_min, size\_max - equality testing on user-defined properties (preface property name with "property-" in query) * updating stubs with new sorting logic; updating tests * fixing some copy/paste errors * fixing some webob exceptions * slight modification to registry db api to ensure marker works correctly * slight refactoring per jaypipes' suggestions; sort on get images calls is now created\_at desc * Add tests for 'iso' image type. Remove hard coding of next available image id in tests. This prevents new test images from being added to the set generated by tests.unit.stubs.FakeDatastore * pulling from parent branch * docstring fix * pushing marker/limit logic down into registry db api * adding support for marker & limit query params * removing some unnecessary imports * making registry db api filters more structured; adding in a bit of sqlalchemy code to filter image properties more efficiently * consolidating image\_get\_all\_public and image\_get\_filtered in registry db api * adding test case for multiple parameters from command line * adding custom property api filtering * adding size\_min and size\_max api query filters * implemented api filtering on name, status, disk\_format, and container\_format * Adds versioning to the Glance API * Add test and fix for /v1.2/images not properly returning version choices * Add more tests for version URIs and accept headers and fix up some of Brian's review comments * Fix merge conflict.. * Changes versioned URIs to be /v1/ instead of /v1.0/ * Improve logging configuration docs.. * Doc and docstring fixes from Dan's review * Removed some test config files that slipped in.. * Fix up find\_config\_file() to accept an app\_name arg. Update all documentation referencing config files * Fix pep8 complaint * Add DISK\_FORMAT for 'iso' type images * Adds versioning to Glance's API * Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state * Fix numbering in comment.. * Fixed doh. Updates test case to test for condition that should have failed with status!='active' * Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state * Adding prefilled Authors, mailmap files Adding test to validate Authors file is properly set up * Documentation updates to make glance add command clearer, hopefully :) * adding Authors functionality; fixing one rogue pep8 violation * Improve logging configuration docs.. * Prevent users from uploading images with a bad or missing store. Allow deletion from registry when backend cannot be used * bcwaldon review fixups * adding comment * Fix for bug #768969: glance index shows non-active images; glance show does not show status * Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact * catching NotFound to prevent failure on bad location * Prevent requests with invalid store in location param * Allow registry deletion to succeed if store deletion fails * Documentation updates to make glance add command clearer, hopefully :) * Fix for LP Bug #768969 * Expanding user confirmation default behavior * removing excessive exception handling * pep8 fixes * docstring and exception handling * Expanding user\_confirm default behavior * I modified documentation to show more first-time user friendly examples on using glance. With the previous examples, I followed it as a first-time user and had to spend more than necessary time to figure out how to use it. With this modification, other first-time users would make it work on their systems more quickly * - Require user confirmation for "bin/glance clear" and "bin/glance delete " - Allow for override with -f/--force command-line option * adding --force option to test\_add\_clear * Adds a test case for updating an image's Name attribute. glance update was not regarding 'name' as a top-level modifiable attribute.. * Name is an attribute that is modifiable in glance update, too. * Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command * Update tests and .bzrignore to use tests.sqlite instead of glance.sqlite * Only modify the connection URL in runs\_sql if the original connection string starts with 'sqlite' * Create a decorator that handles setting the SQL store to a disk-based SQLite database when arbitrary SQL statements need to be run against the registry database during a test case * Docstring update on the run\_sql\_command function * Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command * Add log\_file to example glance.conf * fixing spacing in help text * adding confirmation on image delete/clear; adding user\_confirm functionality * Add log\_file to example glance.conf * Make sure we use get\_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( * Fixing tests. Sorry for late response * Make sure we use get\_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( * resolve merge conflicts * chnaged output * Open Diablo release * Diablo versioning * Fake merge with ancient trunk. This is only so that people who "accidentally" have been following lp:~hudson-openstack/glance/trunk will not have problems updating to this 2011.2 ------ * Final versioning for Cactus * fixing after review * Removes capture of exception from eventlet in \_upload\_and\_activate(), which catches the exceptions that come from the \_safe\_kill() method properly * RickH fixups from review * Add catch-all except: block in \_upload() * change output from glance-registry * get latest from lp:glance * Ensures that configuration values for debug and verbose are used if command-line options are not set * Removes capture of exception from eventlet in \_upload\_and\_activate(), which catches the exceptions that come from the \_safe\_kill() method properly * Fix logging in swift * Fix Thierry's notice about switched debug and verbose * Change parsing of headers to accept 'True', 'on', 1 for boolean truth values * Final cactus versioning * OK, fix docs to make it clear that only the string 'true' is allowed for boolean headers. Add False-hood unit tests as well * Logging was not being setup with configuration file values for debug/verbose * Fix up the way the exception is raised from \_safe\_kill()... When I "fixed" bug 729726, I mistakenly used the traceback as the message. doh * Change parsing of headers to accept 'True', 'on', 1 for boolean truth values * Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install * Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install * Changed raise of exception to avoid displaying incorrect error message in \_safe\_kill() * fix logging in swift * Changes "key" column in image\_properties to "name" * Updated properties should be marked as deleted=0. This allows previously deleted properties to be reactivated on an update * Adds --config-file option to common options processing * Update the docs in bin/glance so that help for the 'update' command states that metadata not specified will be deleted * Fix config test fixtures and pep8 error in bin/glance-manage * Provide revised schema and migration scripts for turning 'size' column in 'images' table to BIGINT. This overcomes a 2 gig limit on images sizes that can be downloaded from Glance * Updated properties should be marked as deleted=0. Add unit tests * Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 * Change order of setting debug/verbose logging. Thanks for spotting this, Elgar * Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 * Ensure we don't ask the backend store to delete an image if the image is in a queued or saving state, since clearly the backend state has yet to completely store the image * Changes "key" column in image\_properties to "name" * Use logging module, not echo for logging SQLAlchemy * Updates glance-manage to use configuration files as well as command line options * Ensure we don't ask a backend store to delete an image if the image is queued or saving * Moved migration into Python script, otherwise PostgreSQL was not migrated. Added changes to the functional test base class to reset the data store between tests. GLANCE\_SQL\_CONNECTION env variable is now GLANCE\_TEST\_SQL\_CONNECTION * changed to more typical examples * Add migration scripts for revising the datatype of the 'size' column in the images table * Changes to database schema required to support images larger than 2Gig on MySQL. Does not update the migration scripts * Updates to the Registry API such that only external requests to update image properties purge existing properties. The update\_image call now contains an extra flag to purge\_props which is set to True for external requests but False internally * Updates to the Registry API such that only external requests to update image properties purge existing properties. The update\_image call now contains an extra flag to purge\_props which is set to True for external requests but False internally * Update the glance registry so that it marks properties as deleted if they are no longer exist when images are updated * Simple one.. just add back the Changelog I removed by accident in r94. Fixes bug #742353 * Adds checksumming to Glance * Uhhhm, stop\_servers() should stop servers, not start them! Thanks to Cory for uncovering this copy/paste fail * Fix up test case after merging in bug fixes from trunk... expected results were incorrect in curl test * Add ChangeLog back to MANIFEST.in * Add migration testing and migration for disk\_format/container\_format * tests.unit.test\_misc.execute -> tests.utils.execute after merge * Allow someone to set the GLANCE\_TEST\_MIGRATIONS\_CONF environment variable to override the config file to run for the migrations unit test: * Update the glance registry so that it marks properties as deleted if they are no longer in the update list * Start eventlet WSGI server with a logger to avoid stdout output * Adds robust functional testing to Glance * Add migration script for checksum column * Fixed an oops. Didn't realized Repository.latest returned a 0-based version number, and forgot to reversed() the downgrade test * OK, migrations are finally under control and properly tested * Remove non-existing files from MANIFEST.in * Removed glance-combined. Fixed README * Removed glance-commit * Re-raise \_safe\_kill() exception in non-3-arg form to avoid pep8 deprecation error * Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv * Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv * Removes glance-combined and fixes TypeError from bad function calls in glance-manage * Start eventlet WSGI server with a logger to avoid stdout output * Pass boolean values to glance.client as strings, not integers * Small adjustment on wait\_for\_servers()... fixed infinite loop possibility * Adds robust functional testing to Glance * Ensure Content-type set to application/octet-stream for GET /images/ * Ensure Content-Length sent for GET /images/ * HTTPBackend.get() needed options in kwargs * Remove glance-combined (use glance-control all start). Fix glance-manage to call the setup\_logging() and add\_logging\_options() methods according to the way they are called in glance-api and glance-registry * Support account:user:key in Swift URIs. Adds unit tests for various calls to parse\_swift\_tokens() * Adds documentation on configuring logging and a unit test for checking simple log output * Support account:user:key in Swift URIs. Adds unit tests for various calls to parse\_swift\_tokens() * Cherry pick r86 from bug720816 * Cherry pick r87 from bug720816 * Fixed run\_tests.py addError() method since I noted it was faulty in another branch.. * Tiny pep8'ers * I stole the colorized code from nova * Fix typo * A quick patch to allow running the test suite on an alternate db backend * Merged trunk -resolved conflicts * [Add] colorization stolen from nova * Don't require swift module for unit-tests * Pep8 fix * Backing out unit-test workaround * Changed to have 2 slashes * Allow unit-tests to run without swift module * Remove spurios comment in test file * Add Glance CLI tool * Silly mistake when resolving merge conflict...fixed * Fixes passing of None values in metadata by turning them into strings. Also fixes the passing of the deleted column by converting it to and from a bool. The test for passing metadata was updated to include these values * Adds documentation on configuring logging and a test that log\_file works. It didn't, so this also inludes fixes for setting up log handling :) * fix data passing * add failing test for None and deleted * Uses logger instead of logging in migration.py * Using logger in migration api instead of logging directly * Only clean up in the cleanup method. Also, we don't need the separate URI now * Use unregister\_models instead of os.unlink to clean up after ourselves * Fixed unregister\_models to actually work * Fixed migration test to use a second DB URL * Replaced use of has\_key with get + default value * Make it clear that the checksum is an MD5 checksum in docs * Adds checksumming to Glance * Whoops! Left out a self.db\_path * Allow tests to run on an alternate dburi given via environment variables * Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module * Remove last vestiges of account in Swift store * Quick fixup on registry.get\_client() * Public? => Public: per Cory's comment. Added a little more robust exception handling to some methods in bin/glance * Fixes for Devin and Rick's reviews * Adds disk\_format and container\_format to Image, and removes the type column * Fixes client update\_image to work like create\_image. Also fixes some messed up exceptions that were causing a try, except to reraise * Final review fixes. Makes disk\_format and container\_format optional. Makes glance-upload --type put the type in properties * remove test skip * Put account in glance.conf.sample's swift\_store\_auth\_address, use real swift.common.client.ClientException, ensure tests work with older installed versions of Swift (which do not have, for example, swift.common.client.Connection.get\_auth method) * Work around Eventlet exception clearing by memorizing exception context and re-raising using 3-arg form * Adds bin/glance to setup.py * Fixes from Rick's review #1 * Reverts Image \`type\` back to the old behavior of being nullable * Work around Eventlet exception clearing * Add sys.path mangling to glance-upload * Add sys.path adjustment magic to glance-upload * Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module * Couple tiny cleanups noticed when readin merge diff. * bin/glance-admin => bin/glance, since it's really just the CLI tool to interact with Glance. Added lots of documentation and more logging statements in some critical areas (like the glance.registry calls.. * Adds lots of unit tests for verifying exceptions are raised properly with invalid or mismatched disk and container formats * Makes --kernel and --ramdisk required arguments for glance-upload since Nova currently requires them * Removing image\_type required behavior * Removing requirement to pass kernel and ramdisk * Add test cases for missing and invalid disk and container formats * Requiring kernel and ramdisk args in glance-upload * Make disk\_format and container\_format required * Make disk\_format and container\_format required * Adds an admin tool to Glance (bin/glance-admin) that allows a user to administer the Glance server: * Make sure validate\_image() doesn't throw exception on missing status when updating image * Adds disk\_format and container\_format to Image, and removes the type column * This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server * Add debugging output to assert in test\_misc. Trying to debug what Hudson fails on.. * Fixups from Rick's review * Removes now-unnecessary @validates decorator on model * I should probably rebase this commit considering all the previous commits weren't actually addressing the issue. The fact that I had glance-api and glance-registry installed on my local machine was causing the test runs to improperly return a passing result * Use Nova's path trick in all bins.. * Add path to glance-control * Removes image type validation in the Glance registry * Adding vhd as recognized image type * Reverting the removal of validation * Removing image type validation * Adds --pid-file option to bin/glance-control * Add %default for image type in glance-upload * Adds Location: header to return from API server for POST /images, per APP spec * Cleanups from Soren's review * Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library * Adds Location: header to return from API server for POST /images, per APP spec * This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server * Adds --pid-file option to bin/glance-control * Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library * Adds sql\_idle\_timeout to reestablish connections to database after given period of time * Add sql\_idle\_timeout * Removes lockfile and custom python-daemon server initialization in favour of paste.deploy * Review 3 fixups * Remove get\_config\_file\_options() from glance-control * Fixes for Rick review #2 * Remove no-longer-needed imports.. * Remove extraneous debug import.. * Changes the server daemon programs to be configured only via paste.deploy configuration files. Removed ability to configure server options from CLI options when starting the servers with the exception of --verbose and --debug, which are useful during debugging * Adds glance-combined and glance-manage to setup.py * Fix merge conflicts * Adds glance-combined and glance-manage to setup.py * Fixes bug 714454 * ReStructure Text files need to end in .rst, not .py ;) * Update README, remove some vestigial directories, and other small tweaks * Removing dubious advice * Adds facilities for configuring Glance's servers via configuration files * Use fix\_path on find\_config\_file() too * Fixups from Rick's review * Including tests/ in pep8 * Typo fixes, clarifying * Updating README, rmdir some empty dirs * Adds bin/glance-control program server daemonization wrapper program based on Swift's swift-init script * Ignore build and deploy-related files * Adds sqlalchemy migrations * Fix bug 712575. Make BASE = models.BASE * Make sure BASE is the models.BASE, not a new declarative\_base() object * Had to reverse search order of directories for finding config files * Removes lockfile and custom python-daemon server initialization in favour of paste.deploy * Adds facilities for configuring Glance's servers via configuration files * Creating indexes * Adding migration test * Fixing migration import errors * Small cleanups * glance-manage uses common options * Merging in glance/cactus * Pep8 fix * Pep8 fixes * Refactoring into option groups 0.1.7 ----- * Hopefully-final versioning (0.1.7), no review needed * Final versioning, no review needed * Adding db\_sync to mirror nova * Adding some basic documentation * Better logging * Adding image\_properties migration * Adding migration for images table * Adding migration management commands * Remove debugging output that wasn't supposed to go into this branch (yet) :) * Adds --debug option for DEBUG-level logging. --verbose now only outputs INFO-level log records * Typo add\_option -> add\_options * Fixes from Rick's review. Thanks, Rick * Adds --sql-connection option * First round of logging functionality: * Merged use-optparse * Removes glance.common.db.sqlalchemy and moves registration of models and create\_engine into glance.registry.db.api * pep8-er in bin/glance-combined * Fixes lp710789 - use-optparse breaks daemonized process stop * Adds bin/glance-combined. Useful in testing.. * Tiny pep8 fixup in setup.py * Rework what comes back from parse\_options()[0] to not stringify option values. Keep them typed * Remove use of gflags entirely. Use optparse * Removing unecessary param to get\_all\_public * Merging trunk * Adding back some missing code * Cleaning up some code * Makes Glance's versioning non-static. Uses Nova's versioning scheme * Adds/updates the copyright info on most of the files in glance and copies over the Authors check from Nova * Removing sqlalchemy dir * Removed methods from sqlalchemy/api * Refactor update/create * Messed up a permission somehow * Refactoring destroy * feh * A few more * A few more I missed * version bumped after tarball cut. no review needed.. * Bump version * Removing authors test for now * PEP8 cleanup * PEP8 cleanup * Should fix the sphinx issue * Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs * Make sphinx conditional * bumps version after tarball release of 0.1.4 * Bump version * Added bzr to pip-requires and refixed some pep8 stuff * Authors check * A few more copyrights * Copyright year change * Pylint cleanup * Added copyright info * Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs * bumps release version. ready for Bexar final release * Version bump after release * added sphinx and argparse into tools/pip-requires so that setup.py works. this bug also prevents nova from creating a virtualenv * fixes setup install pip dependencies * Version bump for release * Fixes bug #706636: Make sure pep8 failures will return failure for run\_tests.sh * Make run\_tests.sh return failure when pep8 returns fail, and fix the pep8 error in /bin/glance-upload * This patch: \* Converts dashes to underscores when extracting image-properties from HTTP headers (we already do this for 'regular' image attributes \* Update image\_properties on image PUTs rather than trying to create dups * This patch replaces some remaining references to req.body (which buffers the entire request body into memory!) with the util.has\_body method which can determine whether a body is present without reading any of it into memory * Adding Apache license, fixing long line * Making glance-upload a first-class binary * Revove useless test\_data.py file, add image uploader * Fix property create * Dont buffer entire image stream on PUT * Adds man pages for glance-registry and glance-api programs. Adds Getting Started guide to the Glance documentation * Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client * Duh, it helps to import the class you are inheriting from... * OK, found a solution to our test or functional dilemma. w00t * Make compat with chunked transfer * Removes the last vestiges of Twisted from Glance * Pull in typo fix * Add in manpage installation hook. Thanks Soren :) * Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client * Removes Twisted from tools/install\_venv.py and zope.interface from tools/pip-requires. Shaved a full 45 seconds for me off of run\_tests.sh -V -f now we're not downloading a giant Twisted tarball.. * Remove last little vestiges of twisted * Quick typo fix in docs * Add run\_tests.py to tarball * Also include run\_tests.py in tarball * Adds man pages for glance-registry and glance-api. Adds Getting Started guide to Glance docs * Fixes bug #696375: x-image-meta-size not optional despite documentation saying so * PEP8 fixes in /glance/store/\_\_init\_\_.py * Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 * Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 * upgrade version.. * Fixes Bug#696375: x-image-meta-size is not optional, contrary to documentation * Increase version after release * Cut 0.1.2 * Files missing from the tarball (and you probably need to cut a 0.1.2.) * Cleanup of RST documentation and addition of docs on an image's status * Include some files that were left out * Implements the S3 store to the level of the swift store * fixes bug698318 * Fixes suggested by JayPipes review. Did not modify docstrings in non-related files * This merge is in conjunction with lp:~rconradharris/nova/xs-snap-return-image-id-before-snapshot * Updating docs * Merging trunk * Clean up the rest of Glance's PEP8 problems * PEP-8 Fixes * Fixing eventlet-raise issue * Bug #698316: Glance reads the whole image into memory when handling a POST /images request * Merging trunk * Fixed pylint/pep8 for glance.store.s3 * Implement S3 to the level of swift * removing old methods * refactoring so update can take image\_data * More PEP8 fixes * Fix all Glance's pep8 problems * Remove incorrect doccomments about there being a default for the host parameter, fix misdocumented default port, and remove handling of missing parameters in BaseClient, because the values are always specified by the subclass's \_\_init\_\_ * Bug #696385: Glance is not pep8-clean * Bug #696382: Glance client parameter defaults misdocumented * Fixes a number of things that came up during initial coding of the admin tool: * Made review changes from Rick * Duh, use\_ssl should not use HTTPConnection.. * Remove final debugging statement * merge trunk * Remove debugging statements * Fixes a number of things that came up during initial coding of the admin tool: * fix bug 694382 * Bug #694382: setup.py refers to parallax-server and teller-server, when these have been renamed * documentation cleanup and matching to other OpenStack projects. Glance is no longer the red-headed documentation stepchild in OpenStack.. * Converts timestamp attributes to datetime objects before persisting * Adding \_\_protected\_attributes\_\_, some PEP8 cleanups * review fixes * Update sphinx conf to match other OpenStack projects * Documentation cleanup. Splits out index.rst into multiple section docs * Converting to datetime before saving image * Enhances POST /images call to, you know, actually make it work.. * Make directory for filesystem backend * doing the merge of this again...somehow the trunk branch never got rev26 :( * Adds POST /images work that saves image data to a store backend * Update docs for adding image.. * Fix Chris minor nit on docstring * Fixes binaries, updates WSGI file to more recent version from Nova, and fixes an issue in SQLAlchemy API that was being hidden by stubs and only showed up when starting up the actual binaries and testing.. * Major refactoring.. * Fix testing/debug left in * Fixes from review * Documentation updates and GlanceClient -> Client * Refactor a bunch of stuff around the image files collection * Cleanup around x-image-meta and x-image-meta-property HTTP headers in GET/HEAD * Update /glance/client.py to have GlanceClient do all operations that RegistryClient does * Merges Glance API with the registry API: \* Makes HEAD /images/ return metadata in headers \* Make GET /images/ return image data with metadata in headers Updates docs some (more needed) * Second step in simplifying the Glance API * This is the first part of simplifying the Glance API and consolidating the Teller and Parallax APIs into a single, unified Glance API * Adds DELETE call to Teller API * Fixes Swift URL Parsing in Python 2.6.5 by adding back netloc * Moving imports into main which will only be executed after we daemonize thus avoiding the premature initialization of epoll * Delaying eventlet import until after daemonization * Fix Swift URL parsing for Python 2.6.5 * Don't leak implementation details in Swift backend. Return None on successful delete\_object call * Adds call to Swift's DELETE * Typo fixed and tiny cleanups * Adds DELETE to Teller's API * Just some small cleanups, fixing: \* Swapped port numbers (Parallax Port <=> Teller port) \* Removing extraneous routes in Teller API \* Adding required slashes to do\_request * \* Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. \* Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data * Fixing swapped port numbers, removing extraneous routes in Teller controller, adding required slash for do\_request calls * \* Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. \* Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data * Add files attribute to Parallax client tests * Adds client classes for Parallax and Teller and fixes some issues where our controller was not returning proper HTTP response codes on errors.. * Cleanup/fixes for Rick review * Adds client classes ParallaxClient and (stubbed) TellerClient to new glance.client module * packaging fixups preparing for release candidate * Remove symlinks in bin/ * Packaging fixups * awesomeness. merging into trunk since my parallax-api is already in trunk I believe. :) * Moving ATTR helpers into db module * PUTing and POSTing using image key * Quick fix...gives base Model an update() method to make it behave like a dict * Make returned mapping have an 'image' key to help in XML serialization * Ignore virtualenv directory in bzr * This patch removes unique index on the 'key' column of image\_metadatum and replaces it with a compound UniqueConstraint on 'image\_id' and 'key'. The 'key' column remains indexed * Fixes lp653358 * Renaming is\_cloudfiles\_available -> is\_swift\_available * Adds compound unique constraint to ImageMetadatum * Using swift.common.client rather than python-cloudfiles in Teller's Swift backend * Adds DELETE to the Parallax REST API * Implements the REST call for updating image metadata in the Parallax API * Implements Parallax API call to register a new image * Adds a /images/detail route to the Parallax controller, adds a unit test for it, and cleans up Michael's suggestions * Works around non-RFC compliance in Python (< 2.6.5) urlparse library * Workaround for bug in Python 2.6.1 urlparse library * Adds tests for bad status set on image * Implements Parallax API call to register a new image * This patch overhauls the testing in Glance: * unittest2 -> unittest. For now, since not using unittest2 features yet * Fixes up test\_teller\_api.py to use stubout correctly. Fixes a few bugs that showed up in the process, and remove the now-unnecessary FakeParallaxAdapter * First round of cleaning up the unittests. Adds test suite runner, support for virtualenv setup and library dependencies, resolves issues with ImportErrors on cloudfiles, adds pymox/stubout support and splits the backend testing into distinct unittest cases * With this patch Parallax and teller now work end-to-end with the Swift backend * Adding missing backend files, fixing typos in comments * This patch: \* Decouples Controller for ParallaxAdapter implementation by adding generic RegistryAdapter and providing a lookup function \* Adds base model attributes to Parallax's JSON (created\_at, etc) * Improving symmetry between teller and parallax * Fixing swift authurl * Add RegistryAdapter, include ModelBase attributes * Fixing Teller image tests * Created teller-server.py in bin/ * Cleaning up Teller backend * Rewrote ImageController to inherit from the work Rick Harris did in glance.common. Moved it into teller/api/images.py to make teller match parallax. Fixed tests. Renamed them to distinguish if any parallax tests ever get written * Adding Image index call, nesting the Image show dict to facilitate XML serialization * Moving parallax models out of common and into the parallax module * Updated tests * Reimplements server.py as a wsgi api inheriting from glance.common * This patch: \* pulls in a number of useful libraries from Nova under the common/ path (we can factor those out to a shared library in Bexar-release) \* Defines the models in common.db.sqlalchemy.models.py (this should be factored out into the parallax package soon) \* Adds the parallax api-server under /bin (if PyPI was used to pull python-daemon and python-lockfile, you may need to apply a patch I have against it) * Changes the obj['uri'] to obj['location'] to better sync with the representation within Nova. Adds the image\_lookup\_fn = ParallaxAdapter.lookup to teller.server * ImageChunk -> ImageFile, merging APIRouter into API for now * Adding Apache header to test\_data.py * Small cleanups * Parallax will return obj['location'] instead of obj['uri'], also maybe a parallax lookup fn would be nice? * Implements a Parallax adapter for looking up images requested from nova. Adds a size check to SwiftBackend to ensure that the chunks haven't been truncated or anything * Reconciling parallax modifications with modulization of glance * Adding Images controller * Adding API directory and server.py * Modulify the imports * Implements Parallax adapter for lookups from Teller, also adds size expectations to the backend adapters * Adding files from Nova * Makes glance a module, containing teller and parallax sub-modules * libify glance into teller and parallax modules. Make nosetests work by making tests and tests/unit/ into packages * Rearranged the code a little. Added a setup.py. Added sphinx doc skeleton * Added setup.py and sphinx docs * Reorg to make Monty's build pedanticness side happier * Implements Swift backend for teller * ignore all .pyc files * Merging ricks changes * Adding basic image controller and mock backends * Adding description of registry data structure * Adding teller\_server * adding filesystem and http backends * Initial check-in ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/HACKING.rst0000664000175000017500000000142200000000000014653 0ustar00zuulzuul00000000000000glance Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on glance Specific Commandments ---------------------------- - [G316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B) - [G317] Change assertEqual(type(A), B) by optimal assert like assertIsInstance(A, B) - [G318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like assertIsNone(A) - [G319] Validate that debug level logs are not translated - [G327] Prevent use of deprecated contextlib.nested - [G328] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs - [G330] Log.warn is deprecated. Enforce use of LOG.warning. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/LICENSE0000664000175000017500000002363700000000000014076 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/PKG-INFO0000664000175000017500000000645200000000000014162 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: glance Version: 29.0.0 Summary: OpenStack Image Service Home-page: https://docs.openstack.org/glance/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================ OpenStack Glance ================ Glance is an OpenStack project that provides services and associated libraries to store, browse, share, distribute and manage bootable disk images, other data closely associated with initializing compute resources, and metadata definitions. Use the following resources to learn more: API --- To learn how to use Glance's API, consult the documentation available online at: * `Image Service APIs `_ Developers ---------- For information on how to contribute to Glance, please see the contents of the CONTRIBUTING.rst in this repository. Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: * `Official Glance documentation `_ * `Official Client documentation `_ Operators --------- To learn how to deploy and configure OpenStack Glance, consult the documentation available online at: * `Openstack Glance `_ In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. You can raise bugs here: * `Bug Tracker `_ Release notes ------------- To learn more about Glance's new features, optimizations, and changes between versions, consult the release notes online at: * `Release Notes `__ Other Information ----------------- During each design summit, we agree on what the whole community wants to focus on for the upcoming release. You can see image service plans: * `Image Service Plans `_ For more information about the Glance project please see: * `Glance Project `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/README.rst0000664000175000017500000000364300000000000014553 0ustar00zuulzuul00000000000000================ OpenStack Glance ================ Glance is an OpenStack project that provides services and associated libraries to store, browse, share, distribute and manage bootable disk images, other data closely associated with initializing compute resources, and metadata definitions. Use the following resources to learn more: API --- To learn how to use Glance's API, consult the documentation available online at: * `Image Service APIs `_ Developers ---------- For information on how to contribute to Glance, please see the contents of the CONTRIBUTING.rst in this repository. Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: * `Official Glance documentation `_ * `Official Client documentation `_ Operators --------- To learn how to deploy and configure OpenStack Glance, consult the documentation available online at: * `Openstack Glance `_ In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. You can raise bugs here: * `Bug Tracker `_ Release notes ------------- To learn more about Glance's new features, optimizations, and changes between versions, consult the release notes online at: * `Release Notes `__ Other Information ----------------- During each design summit, we agree on what the whole community wants to focus on for the upcoming release. You can see image service plans: * `Image Service Plans `_ For more information about the Glance project please see: * `Glance Project `_ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7742932 glance-29.0.0/api-ref/0000775000175000017500000000000000000000000014401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7902951 glance-29.0.0/api-ref/source/0000775000175000017500000000000000000000000015701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/conf.py0000664000175000017500000001505600000000000017207 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # glance api-ref build config file, copied from: # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys extensions = [ 'os_api_ref', 'openstackdocstheme', ] html_theme = 'openstackdocs' html_theme_options = { "sidebar_dropdown": "api_ref", "sidebar_mode": "toc", } # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' copyright = '2010-present, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # openstackdocstheme options openstackdocs_repo_name = 'openstack/glance' openstackdocs_bug_project = 'glance' openstackdocs_bug_tag = 'api-ref' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'glancedoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Glance.tex', 'OpenStack Image Service API Documentation', 'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/heading-level-guide.txt0000664000175000017500000000107100000000000022240 0ustar00zuulzuul00000000000000=============== Heading level 1 =============== ReStructured Text doesn't care what markers you use for headings, but it does require you to be consistent. Here's what we are using in the Image API reference documents. Level 1 is mostly used in the .rst files. For the .inc files, the top-level heading will most likely be a Level 2. Heading level 2 *************** Heading level 3 ~~~~~~~~~~~~~~~ Heading level 4 --------------- Heading level 5 +++++++++++++++ Heading level 6 ############### Heading level 7 """"""""""""""" Heading level 8 ''''''''''''''' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/index.rst0000664000175000017500000000151500000000000017544 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Image Service APIs ================== API content can be searched using the :ref:`search`. .. toctree:: :maxdepth: 2 versions/index v2/index v2/metadefs-index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7942955 glance-29.0.0/api-ref/source/v2/0000775000175000017500000000000000000000000016230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/cache-manage-parameters.yaml0000664000175000017500000000134600000000000023552 0ustar00zuulzuul00000000000000cached_images: description: | A list of cached image JSON objects, possibly empty, where each object contains the following fields: ``image_id`` The id of the cached image ``hits`` The number of cache hits for this image. ``last_accessed`` Epoch time when the cached image was most recently accessed. ``last_modified`` Epoch time when the cached image was installed into the cache. ``size`` Size in bytes of the cached image. in: body required: true type: array queued_images: description: | A list of image ids, possibly empty, of images queued to be cached, listed in the order in which they will be processed. in: body required: true type: array ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/cache-manage.inc0000664000175000017500000000275400000000000021224 0ustar00zuulzuul00000000000000.. -*- rst -*- Cache Manage ************ List and manage the cache. Query cache status ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/cache Lists all images in cache or queue. *(Since Image API v2.14)* Normal response codes: 200 Error response codes: 400, 401, 403 Request ------- No request parameters. Response Parameters ------------------- .. rest_parameters:: cache-manage-parameters.yaml - cached_images: cached_images - queued_images: queued_images Response Example ---------------- .. literalinclude:: samples/cache-list-response.json :language: json Queue image ~~~~~~~~~~~ .. rest_method:: PUT /v2/cache/{image_id} Queues image for caching. *(Since Image API v2.14)* Normal response codes: 202 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Delete image from cache ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/cache/{image_id} Deletes a image from cache. *(Since Image API v2.14)* Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Clear images from cache ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/cache Clears the cache and its queue. *(Since Image API v2.14)* Normal response codes: 204 Error response codes: 400, 401, 403 Request ------- .. rest_parameters:: images-parameters.yaml - x-image-cache-clear-target: cache-clear-header ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/discovery-parameters.yaml0000664000175000017500000000326000000000000023265 0ustar00zuulzuul00000000000000stores: description: | A list of store objects, where each store object may contain the following fields: ``id`` Operator-defined identifier for the store. ``description`` Operator-supplied description of this store. ``default`` (optional) Only present on the default store. This is the store where image data is placed if you do not indicate a specific store when supplying data to the Image Service. (See the :ref:`Image data ` and :ref:`Interoperable image import ` sections for more information.) ``read-only`` (optional) Included only when the store is read only. in: body required: true type: array stores-detail: description: | A list of store objects, where each store object may contain the following fields: ``id`` Operator-defined identifier for the store. ``type`` Specify the type of store. ``description`` Operator-supplied description of this store. ``default`` (optional) Only present on the default store. This is the store where image data is placed if you do not indicate a specific store when supplying data to the Image Service. (See the :ref:`Image data ` and :ref:`Interoperable image import ` sections for more information.) ``read-only`` (optional) Included only when the store is read only. ``weight`` (default 0) Contains weight (positive integer) to sort image locations for preference. ``properties`` Contains store specific properties in: body required: true type: array ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/discovery.inc0000664000175000017500000000574500000000000020745 0ustar00zuulzuul00000000000000.. -*- rst -*- Image Service Info (Discovery) ****************************** General information ~~~~~~~~~~~~~~~~~~~ These calls allow you to discover useful information about what services you may consume from a particular deployment of the OpenStack Image Service. .. _import-discovery-call: Import methods and values discovery ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/info/import Returns information concerning the constraints around image import in the cloud in which the call is made, for example, supported container formats, supported disk formats, maximum image size, etc. This call contains a ``import-methods`` field consisting of an array of string identifiers indicating what import methods are supported in the cloud in which the call is made. *(Since Image API v2.6)* .. note:: In the Image API v2.6-2.8, this discovery call contains **only** the ``import-methods`` field. Normal response codes: 200 Error response codes: 400, 401, 403 Request ------- There are no request parameters. This call does not allow a request body. Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - import-methods: import-methods Response Example ---------------- .. literalinclude:: samples/image-info-import-response.json :language: json .. _store-discovery-call: List stores ~~~~~~~~~~~ .. rest_method:: GET /v2/info/stores A multiple store backend support is introduced in the Rocky release as a part of the EXPERIMENTAL Image API v2.8. In version 2.7 of the API, this call will return a 404 (Not Found). Use the :ref:`API versions call ` to determine what API versions are available in your cloud. Normal response codes: 200 Error response codes: 404 Request ------- There are no request parameters. This call does not allow a request body. Response Parameters ------------------- .. rest_parameters:: discovery-parameters.yaml - stores: stores Response Example ---------------- .. literalinclude:: samples/stores-list-response.json :language: json Quota usage ~~~~~~~~~~~ .. rest_method:: GET /v2/info/usage The user's quota and current usage are displayed, if enabled by server-side configuration. Normal response codes: 200 Request ------- There are no request parameters. This call does not allow a request body. Response Example ---------------- .. literalinclude:: samples/usage-response.json :language: json List stores detail ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/info/stores/detail Lists all the backend stores, with detail, accessible to admins, for non-admin user API will return bad request. Normal response codes: 200 Error response codes: 403, 404 Request ------- There are no request parameters. This call does not allow a request body. Response Parameters ------------------- .. rest_parameters:: discovery-parameters.yaml - stores: stores-detail Response Example ---------------- .. literalinclude:: samples/stores-list-detail-response.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-data.inc0000664000175000017500000001106600000000000021103 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _image-data: Image data ********** Uploads and downloads raw image data. *These operations may be restricted to administrators. Consult your cloud operator's documentation for details.* Upload binary image data ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/images/{image_id}/file Uploads binary image data. *(Since Image API v2.0)* Set the ``Content-Type`` request header to ``application/octet-stream``. A multiple store backend support is introduced in the Rocky release as a part of the EXPERIMENTAL Image API v2.8. Beginning with API version 2.8, an optional ``X-Image-Meta-Store`` header may be added to the request. When present, the image data will be placed into the backing store whose identifier is the value of this header. If the store identifier specified is not recognized, a 400 (Bad Request) response is returned. When the header is not present, the image data is placed into the default backing store. * Store identifiers are site-specific. Use the :ref:`Store Discovery ` call to determine what stores are available in a particular cloud. * The default store may be determined from the :ref:`Store Discovery ` response. * A default store is always defined, so if you do not have a need to use a particular store, simply omit this header and the default store will be used. * For API versions before version 2.8, this header is silently ignored. Example call: :: curl -i -X PUT -H "X-Auth-Token: $token" \ -H "X-Image-Meta-Store: {store_identifier}" \ -H "Content-Type: application/octet-stream" \ -d @/home/glance/ubuntu-12.10.qcow2 \ $image_url/v2/images/{image_id}/file **Preconditions** Before you can store binary image data, you must meet the following preconditions: - The image must exist. - You must set the disk and container formats in the image. - The image status must be ``queued``. - Your image storage quota must be sufficient. - The size of the data that you want to store must not exceed the size that the OpenStack Image service allows. **Synchronous Postconditions** - With correct permissions, you can see the image status as ``active`` through API calls. - With correct access, you can see the stored data in the storage system that the OpenStack Image Service manages. **Troubleshooting** - If you cannot store the data, either your request lacks required information or you exceeded your allotted quota. Ensure that you meet the preconditions and run the request again. If the request fails again, review your API request. - The storage back ends for storing the data must have enough free storage space to accommodate the size of the data. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409, 410, 413, 415, 503 Request ------- .. rest_parameters:: images-parameters.yaml - Content-type: Content-Type-data - X-Image-Meta-Store: store-header - image_id: image_id-in-path Download binary image data ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id}/file Downloads binary image data. *(Since Image API v2.0)* Example call: ``curl -i -X GET -H "X-Auth-Token: $token" $image_url/v2/images/{image_id}/file`` The response body contains the raw binary data that represents the actual virtual disk. The ``Content-Type`` header contains the ``application/octet-stream`` value. The ``Content-MD5`` header contains an MD5 checksum of the image data. Use this checksum to verify the integrity of the image data. **Preconditions** - The image must exist. **Synchronous Postconditions** - You can download the binary image data in your machine if the image has image data. - If image data exists, the call returns the HTTP ``200`` response code for a full image download request. - If image data exists, the call returns the HTTP ``206`` response code for a partial download request. - If no image data exists, the call returns the HTTP ``204`` (No Content) response code. - If no image record exists, the call returns the HTTP ``404`` response code for an attempted full image download request. - For an unsatisfiable partial download request, the call returns the HTTP ``416`` response code. Normal response codes: 200, 204, 206 Error response codes: 400, 403, 404, 416 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - Range: Range Response -------- .. rest_parameters:: images-parameters.yaml - Content-Type: Content-Type-data-response - Content-Md5: Content-Md5 - Content-Length: Content-Length - Content-Range: Content-Range ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-images-v2.inc0000664000175000017500000005713500000000000021773 0ustar00zuulzuul00000000000000.. -*- rst -*- Images ****** Creates, lists, shows, updates, deletes, and performs other operations on images. General information ~~~~~~~~~~~~~~~~~~~ **Images** An *image* is represented by a JSON Object, that is, as a set of key:value pairs. Some of these keys are *base properties* that are managed by the Image service. The remainder are properties put on the image by the operator or the image owner. .. note:: Another common term for "image properties" is "image metadata" because what we're talking about here are properties that *describe* the image data that can be consumed by various OpenStack services (for example, by the Compute service to boot a server, or by the Volume service to create a bootable volume). Here's some important information about image properties: * The base properties are always included in the image representation. A base property that doesn't have a value is displayed with its value set to ``null`` (that is, the JSON null data type). * Additional properties, whose value is always a string data type, are only included in the response if they have a value. * Since version 2.2, the Images API allows an operator to configure *property protections*, by which the create, read, update, and delete operations on specific image properties may be restricted to particular user roles. Consult the documentation of your cloud operator for details. * Arguably the most important properties of an image are its *id*, which uniquely identifies the image, its *status*, which indicates the current situation of the image (which, in turn, indicates what you can do with the image), and its *visibility*, which indicates who has access to the image. * Some properties are used internally by glance and API users are not allowed to set or modify them. Examples of these are *id*, *status*, and anything prefixed with the *os_glance* namespace. .. note:: In addition to image properties, there's usually a data payload that is accessible via the image. In order to give image consumers some guarantees about the data payload (for example, that the data associated with image ``06b73bc7-9d62-4d37-ad95-d4708f37734f`` is the same today as it was when you used it to boot a server yesterday) the Image service controls particular image properties (for example, ``checksum``) that cannot be modified. A shorthand way to refer to the way the image data payload is related to its representation as an *image* in the Images API is to say that "images are immutable". (This obviously applies to the image data payload, not its representation in the Image service.) See the :ref:`Image Data ` section of this document for more information. **Image status** The possible status values for images are presented in the following table. .. list-table:: :header-rows: 1 * - Status - Description * - queued - The Image service reserved an image ID for the image in the catalog but did not yet upload any image data. * - saving - The Image service is in the process of saving the raw data for the image into the backing store. * - active - The image is active and ready for consumption in the Image service. * - killed - An image data upload error occurred. * - deleted - The Image service retains information about the image but the image is no longer available for use. * - pending_delete - Similar to the ``deleted`` status. An image in this state is not recoverable. * - deactivated - The image data is not available for use. * - uploading - Data has been staged as part of the interoperable image import process. It is not yet available for use. *(Since Image API 2.6)* * - importing - The image data is being processed as part of the interoperable image import process, but is not yet available for use. *(Since Image API 2.6)* **Image visibility** The possible values for image visibility are presented in the following table. .. list-table:: :header-rows: 1 * - Visibility - Description * - ``public`` - Any user may read the image and its data payload. Additionally, the image appears in the default image list of all users. * - ``community`` - Any user may read the image and its data payload, but the image does *not* appear in the default image list of any user other than the owner. *(This visibility value was added in the Image API v2.5)* * - ``shared`` - An image must have this visibility in order for *image members* to be added to it. Only the owner and the specific image members who have been added to the image may read the image or its data payload. The image appears in the default image list of the owner. It also appears in the default image list of members who have *accepted* the image. See the :ref:`Image Sharing ` section of this document for more information. If you do not specify a visibility value when you create an image, it is assigned this visibility by default. Non-owners, however, will not have access to the image until they are added as image members. *(This visibility value was added in the Image API v2.5)* * - ``private`` - Only the owner image may read the image or its data payload. Additionally, the image appears in the owner's default image list. *Since Image API v2.5, an image with private visibility cannot have members added to it.* Note that the descriptions above discuss *read* access to images. Only the image owner (or an administrator) has write access to image properties and the image data payload. Further, in order to promise image immutability, the Image service will allow even the owner (or an administrator) only write-once permissions to specific image properties and the image data payload. .. _image-create: Create image ~~~~~~~~~~~~ .. rest_method:: POST /v2/images Creates a catalog record for an operating system disk image. *(Since Image API v2.0)* The ``Location`` response header contains the URI for the image. A multiple store backend support is introduced in the Rocky release as a part of the EXPERIMENTAL Image API v2.8. Since Image API v2.8 a new header ``OpenStack-image-store-ids`` which contains the list of available stores will be included in response. This header is only included if multiple backend stores are supported. The response body contains the new image entity. Synchronous Postconditions - With correct permissions, you can see the image status as ``queued`` through API calls. Normal response codes: 201 Error response codes: 400, 401, 403, 409, 413, 415 Request ------- .. rest_parameters:: images-parameters.yaml - container_format: container_format-in-request - disk_format: disk_format-in-request - id: id-in-request - min_disk: min_disk-in-request - min_ram: min_ram-in-request - name: name-in-request - protected: protected-in-request - tags: tags-in-request - visibility: visibility-in-request Additionally, you may include additional properties specified as key:value pairs, where the value must be a string data type. Keys are limited to 255 chars in length. Available key names may be limited by the cloud's property protection configuration and reserved namespaces like *os_glance*. Request Example --------------- .. literalinclude:: samples/image-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - Location: Location - OpenStack-image-import-methods: import-header - OpenStack-image-store-ids: stores-header - checksum: checksum - container_format: container_format - created_at: created_at - disk_format: disk_format - file: file - id: id - min_disk: min_disk - min_ram: min_ram - name: name - os_hash_algo: os_hash_algo - os_hash_value: os_hash_value - os_hidden: os_hidden - owner: owner - protected: protected - schema: schema-image - self: self - size: size - status: status - tags: tags - updated_at: updated_at - virtual_size: virtual_size - visibility: visibility - direct_url: direct_url - locations: locations The response may also include additional properties specified as key:value pairs if additional properties were specified in the request. Response Example ---------------- .. literalinclude:: samples/image-create-response.json :language: json Show image ~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id} Shows details for an image. *(Since Image API v2.0)* The response body contains a single image entity. Preconditions - The image must exist. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - checksum: checksum - container_format: container_format - created_at: created_at - disk_format: disk_format - file: file - id: id - min_disk: min_disk - min_ram: min_ram - name: name - os_hash_algo: os_hash_algo - os_hash_value: os_hash_value - os_hidden: os_hidden - owner: owner - protected: protected - schema: schema-image - self: self - size: size - status: status - tags: tags - updated_at: updated_at - virtual_size: virtual_size - visibility: visibility - direct_url: direct_url - locations: locations The response may also include additional properties specified as key:value pairs if such properties have been added to the image by the owner or an administrator. Response Example ---------------- .. literalinclude:: samples/image-show-response.json :language: json Show tasks associated with image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id}/tasks Shows tasks associated with an image. *(Since Image API v2.12)* The response body contains list of tasks, possibly empty, associated with the specified image. Preconditions - The image must exist. Normal response codes: 200 Error response codes: 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - tasks: tasks Response Example ---------------- .. literalinclude:: samples/image-tasks-show-response.json :language: json List images ~~~~~~~~~~~ .. rest_method:: GET /v2/images Lists public virtual machine (VM) images. *(Since Image API v2.0)* **Pagination** Returns a subset of the larger collection of images and a link that you can use to get the next set of images. You should always check for the presence of a ``next`` link and use it as the URI in a subsequent HTTP GET request. You should follow this pattern until a ``next`` link is no longer provided. The ``next`` link preserves any query parameters that you send in your initial request. You can use the ``first`` link to jump back to the first page of the collection. If you prefer to paginate through images manually, use the ``limit`` and ``marker`` parameters. **Query Filters** The list operation accepts query parameters to filter the response. A client can provide direct comparison filters by using most image attributes, such as ``name=Ubuntu``, ``visibility=public``, and so on. To filter using image tags, use the filter ``tag`` (note the singular). To filter on multiple tags, include each tag separately in the query. For example, to find images with the tag **ready**, include ``tag=ready`` in your query string. To find images tagged with **ready** and **approved**, include ``tag=ready&tag=approved`` in your query string. (Note that only images containing *both* tags will be included in the response.) A client cannot use any ``link`` in the json-schema, such as self, file, or schema, to filter the response. You can list VM images that have a status of ``active``, ``queued``, or ``saving``. **The** ``in`` **Operator** As a convenience, you may specify several values for any of the following fields by using the ``in`` operator: * container_format * disk_format * id * name * status For most of these, usage is straight forward. For example, to list images in queued or saving status, use: ``GET /v2/images?status=in:saving,queued`` To find images in a particular list of image IDs, use: ``GET /v2/images?id=in:3afb79c1-131a-4c38-a87c-bc4b801d14e6,2e011209-660f-44b5-baf2-2eb4babae53d`` Using the ``in`` operator with the ``name`` property of images can be a bit trickier, depending upon how creatively you have named your images. The general rule is that if an image name contains a comma (``,``), you must enclose the entire name in quotation marks (``"``). As usual, you must URL encode any characters that require it. For example, to find images named ``glass, darkly`` or ``share me``, you would use the following filter specification: ``GET v2/images?name=in:"glass,%20darkly",share%20me`` As with regular filtering by name, you must specify the complete name you are looking for. Thus, for example, the query string ``name=in:glass,share`` will only match images with the exact name ``glass`` or the exact name ``share``. It will not find an image named ``glass, darkly`` or an image named ``share me``. **Size Comparison Filters** You can use the ``size_min`` and ``size_max`` query parameters to filter images that are greater than or less than the image size. The size, in bytes, is the size of an image on disk. For example, to filter the container to include only images that are from 1 to 4 MB, set the ``size_min`` query parameter to ``1048576`` and the ``size_max`` query parameter to ``4194304``. .. _v2-comparison-ops: **Time Comparison Filters** You can use a *comparison operator* along with the ``created_at`` or ``updated_at`` fields to filter your results. Specify the operator first, a colon (``:``) as a separator, and then the time in `ISO 8601 Format `_. Available comparison operators are: .. list-table:: :header-rows: 1 * - Operator - Description * - ``gt`` - Return results more recent than the specified time. * - ``gte`` - Return any results matching the specified time and also any more recent results. * - ``eq`` - Return any results matching the specified time exactly. * - ``neq`` - Return any results that do not match the specified time. * - ``lt`` - Return results older than the specified time. * - ``lte`` - Return any results matching the specified time and also any older results. For example: .. code-block:: console GET v2/images?created_at=gt:2016-04-18T21:38:54Z **Sorting** You can use query parameters to sort the results of this operation. - ``sort_key``. Sorts by an image attribute. Sorts in the natural sorting direction of the image attribute. - ``sort_dir``. Sorts in a sort direction. - ``sort``. Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, the default is ``desc``. To sort the response, use the ``sort_key`` and ``sort_dir`` query parameters: .. code-block:: console GET /v2/images?sort_key=name&sort_dir=asc&sort_key=status&sort_dir=desc Alternatively, specify the ``sort`` query parameter: .. code-block:: console GET /v2/images?sort=name:asc,status:desc .. note:: Although this call has been available since version 2.0 of this API, it has been enhanced from release to release. The filtering and sorting functionality and syntax described above apply to the most recent release (Newton). Not everything described above will be available in prior releases. Normal response codes: 200 Error response codes: 400, 401, 403 Request ------- .. rest_parameters:: images-parameters.yaml - limit: limit - marker: marker - name: name-in-query - owner: owner-in-query - protected: protected-in-query - status: status-in-query - tag: tag-in-query - visibility: visibility-in-query - os_hidden: os_hidden-in-query - member_status: member_status-in-query - size_max: size_max - size_min: size_min - created_at: created_at-in-query - updated_at: updated_at-in-query - sort_dir: sort_dir - sort_key: sort_key - sort: sort Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - images: images - first: first - next: next - schema: schema-images Response Example ---------------- .. literalinclude:: samples/images-list-response.json :language: json .. _v2-image-update: Update image ~~~~~~~~~~~~ .. rest_method:: PATCH /v2/images/{image_id} Updates an image. *(Since Image API v2.0)* Conceptually, you update an image record by patching the JSON representation of the image, passing a request body conforming to one of the following media types: - ``application/openstack-images-v2.0-json-patch`` *(deprecated)* - ``application/openstack-images-v2.1-json-patch`` *(since Image API v2.1)* Attempting to make a PATCH call using some other media type will provoke a response code of 415 (Unsupported media type). The ``application/openstack-images-v2.1-json-patch`` media type provides a useful and compatible subset of the functionality defined in JavaScript Object Notation (JSON) Patch `RFC6902 `_, which defines the ``application/json-patch+json`` media type. .. note:: The ``application/openstack-images-v2.0-json-patch`` media type is based on draft 4 of the standard. Its use is deprecated. For information about the PATCH method and the available media types, see `Image API v2 HTTP PATCH media types `_. Attempting to modify some image properties will cause the entire request to fail with a 403 (Forbidden) response code: - An attempt to modify any of the "base" image properties that are managed by the Image Service. These are the properties specified as read only in the :ref:`Image Schema `. - An attempt to create or modify image properties for which you do not have permission to do so *(since Image API v2.2)*. This depends upon how property protections are configured in the OpenStack cloud in which you are making the call. Consult your cloud's documentation for details. - An attempt to delete the only image location, or to replace the image locations with an empty list *(since Image API v2.4)*. - An attempt to set or modify a property with a reserved name, such as anything prefixed with the *os_glance* namespace. Attempting to add a location path to an image that is not in ``queued`` or ``active`` state will result in a 409 (Conflict) response code *(since Image API v2.4)*. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409, 413, 415 Request ------- .. rest_parameters:: images-parameters.yaml - Content-Type: Content-Type-patch - image_id: image_id-in-path The request body must conform to the ``application/openstack-images-v2.1-json-patch`` media type definition (see above). Request Example --------------- .. literalinclude:: samples/image-update-request.json :language: json Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - checksum: checksum - container_format: container_format - created_at: created_at - disk_format: disk_format - file: file - id: id - min_disk: min_disk - min_ram: min_ram - name: name - owner: owner - os_hash_algo: os_hash_algo - os_hash_value: os_hash_value - os_hidden: os_hidden - protected: protected - schema: schema-image - self: self - size: size - status: status - tags: tags - updated_at: updated_at - visibility: visibility - direct_url: direct_url - locations: locations Response Example ---------------- .. literalinclude:: samples/image-update-response.json :language: json Delete image ~~~~~~~~~~~~ .. rest_method:: DELETE /v2/images/{image_id} (Since Image API v2.0) Deletes an image. You cannot delete images with the ``protected`` attribute set to ``true`` (boolean). Preconditions - You can delete an image in any status except ``deleted``. - The ``protected`` attribute of the image cannot be ``true``. - You have permission to perform image deletion under the configured image deletion policy. Synchronous Postconditions - The response is empty and returns the HTTP ``204`` response code. - The API deletes the image from the images index. - If the image has associated binary image data in the storage backend, the OpenStack Image service deletes the data. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Deactivate image ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/images/{image_id}/actions/deactivate Deactivates an image. *(Since Image API v2.3)* By default, this operation is restricted to administrators only. If you try to download a deactivated image, you will receive a 403 (Forbidden) response code. Additionally, only administrative users can view image locations for deactivated images. The deactivate operation returns an error if the image status is not ``active`` or ``deactivated``. Preconditions - The image must exist. Normal response codes: 204 Error response codes: 400, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Reactivate image ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/images/{image_id}/actions/reactivate Reactivates an image. *(Since Image API v2.3)* By default, this operation is restricted to administrators only. The reactivate operation returns an error if the image status is not ``active`` or ``deactivated``. Preconditions - The image must exist. Normal response codes: 204 Error response codes: 400, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Add Location ~~~~~~~~~~~~ .. rest_method:: POST /v2/images/{image_id}/locations Add location to an image which is in ``queued`` state. Accepts location url, validation_data in JSON body. Adding a location to an image is only allowed for the owner or a user with the service role. If these conditions are not met, a 403 (Forbidden) will be returned. Attempting to add a location path to an image that is not in ``queued`` state will result in a 409 (Conflict) response code Attempting to provide incorrect hash value in validation data(in the case of http store) to the image will result in a 400 (Bad Request) response code. Normal response codes: 200 Error response codes: 400, 403, 404, 409 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - url: locations-url - validation_data: validation-data Request Example --------------- .. literalinclude:: samples/add-location-request.json :language: json Get Location ~~~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id}/locations Lists all locations associated to an image with location url and store-id, accesssible to only service user, for non service users API will return forbidden. Normal response codes: 200 Error response codes: 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path This call does not allow a request body. Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - locations: locations Response Example ---------------- .. literalinclude:: samples/locations-list-detail-response.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-import.inc0000664000175000017500000003211400000000000021501 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _image-import-process: Interoperable image import ************************** An interoperable image import process is introduced in the Image API v2.6. Use the :ref:`API versions call ` to determine what API versions are available in your cloud. General information ~~~~~~~~~~~~~~~~~~~ The exact workflow you use for interoperable image import depends upon the import methods available in the cloud in which you want to import an image. Each of these methods is well defined (which is what makes this process interoperable among different OpenStack clouds). Four import methods are defined: * ``glance-direct`` * ``web-download`` * ``copy-image`` * ``glance-download`` .. note:: Use the :ref:`Import Method Discovery ` call to determine what import methods are available in the cloud to which you wish to import an image. The first step in each interoperable image import method is the same: you must create an image record. This will give you an image id to work with. This image id is how the OpenStack Image service will understand that the other calls you make are referring to this particular image. Thus, the first step is: 1. Create an image record using the :ref:`Image Create ` API call. You must do this first so that you have an image id to work with for the other calls. In a cloud in which interoperable image import is enabled, the :ref:`Image Create ` response will include a ``OpenStack-image-import-methods`` header listing the types of import methods available in that cloud. Alternatively, these methods may be determined independently of creating an image by making the :ref:`Import Method Discovery ` call. In a cloud in which multiple storage backends are available, the :ref:`Image Create ` response will include a ``OpenStack-image-store-ids`` header listing the stores available in that cloud. Alternatively, these stores may be determined independently of creating an image by making the :ref:`Stores Discovery ` call. The glance-direct import method ------------------------------- The ``glance-direct`` workflow has **three** parts: 1. Create an image record as described above. 2. Upload the image data to a staging area using the :ref:`Image Stage ` API call. Note that this image data is not accessible until after the third step has successfully completed. 3. Issue the :ref:`Image Import ` call to complete the import process. You will specify that you are using the ``glance-direct`` import method in the body of the import call. The web-download import method ------------------------------ The ``web-download`` workflow has **two** parts: 1. Create an image record as described above. 2. Issue the :ref:`Image Import ` call to complete the import process. You will specify that you are using the ``web-download`` import method in the body of the import call. The copy-image import method ---------------------------- The ``copy-image`` workflow has **two** parts: 1. Identify the existing image whose data you want to copy to other stores. 2. Issue the :ref:`Image Import ` call to complete the import process. You will specify that you are using the ``copy-image`` import method in the body of the import call. The glance-download import method --------------------------------- The ``glance-download`` workflow has **two** parts: 1. Create an image record as described above. 2. Issue the :ref:`Image Import ` call to complete the import process. You will specify that you are using the ``glance-download`` import method in the body of the import call. .. _image-stage-call: Stage binary image data ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/images/{image_id}/stage Places the binary image data in a staging area. It is not stored in the storage backend and is not accessible for download until after the :ref:`Image Import ` call is made. *(Since Image API v2.6)* Set the ``Content-Type`` request header to ``application/octet-stream``. Example call: :: curl -i -X PUT -H "X-Auth-Token: $token" \ -H "Content-Type: application/octet-stream" \ -d @/home/glance/my.to-import.qcow2 \ $image_url/v2/images/{image_id}/stage **Preconditions** Before you can stage binary image data, you must meet the following preconditions: - The image record must exist. - The image status must be ``queued``. - Your image storage quota must be sufficient. - The size of the data that you want to store must not exceed the size that the OpenStack Image service allows. **Synchronous Postconditions** - With correct permissions, you can see the image status as ``uploading`` through API calls. **Troubleshooting** - If you cannot store the data, either your request lacks required information or you exceeded your allotted quota. Ensure that you meet the preconditions and run the request again. If the request fails again, review your API request. - The storage back ends for storing the data must have enough free storage space to accommodate the size of the data. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 405, 409, 410, 413, 415, 503 If the image import process is not enabled in your cloud, this request will result in a 404 response code with an appropriate message. Request ------- .. rest_parameters:: images-parameters.yaml - Content-type: Content-Type-data - image_id: image_id-in-path .. _image-import-call: Import an image ~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/images/{image_id}/import Signals the Image Service to complete the image import workflow by processing data that has been made available to the OpenStack image service. *(Since Image API v2.6)* In the ``glance-direct`` workflow, the data has been made available to the Image service via the :ref:`Stage binary image data ` API call. In the ``web-download`` workflow, the data is made available to the Image service by being posted to an accessible location with a URL that you know. In the ``copy-image`` workflow, the data is made available to the Image service by copying existing image data to the staging area. In the ``glance-download`` workflow, the data is made available to the Image service by fetching an image accessible from another glance service specified by a region name and an image id that you know. Beginning with API version 2.8, an optional ``stores`` parameter may be added to the body request. When present, it contains the list of backing store identifiers to import the image binary data to. If at least one store identifier specified is not recognized, a 409 (Conflict) response is returned. When the parameter is not present, the image data is placed into the default backing store. * Store identifiers are site-specific. Use the :ref:`Store Discovery ` call to determine what stores are available in a particular cloud. * The default store may be determined from the :ref:`Store Discovery ` response. * A default store is always defined, so if you do not have a need to use particular stores, simply omit this parameter and the default store will be used. * For API versions before version 2.8, this parameter is silently ignored. For backwards compatibility, if the ``stores`` parameter is not specified, the header 'X-Image-Meta-Store' is evaluated. To import the data into the entire set of stores you may consume from this particular deployment of Glance without specifying each one of them, you can use the optional boolean body parameter ``all_stores``. Note that this can't be used simultaneously with the ``stores`` parameter. To set the behavior of the import workflow in case of error, you can use the optional boolean body parameter ``all_stores_must_succeed``. When set to True (default), if an error occurs during the upload in at least one store, the worfklow fails, the data is deleted from stores where copying is done and the state of the image remains unchanged. When set to False, the workflow will fail only if the upload fails on all stores specified. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. The JSON request body specifies what import method you wish to use for this image request. **Preconditions** Before you can complete the interoperable image import workflow, you must meet the following preconditions: - The image record must exist. - You must set the disk and container formats in the image record. (This can be done at the time of image creation, or you can make the :ref:`Image Update ` API call. - Your image storage quota must be sufficient. - The size of the data that you want to store must not exceed the size that the OpenStack Image service allows. **Additional Preconditions** If you are using the ``glance-direct`` import method: - The image status must be ``uploading``. (This indicates that the image data has been uploaded to the stage.) - The body of your request must indicate that you are using the ``glance-direct`` import method. If you are using the ``web-download`` import method: - The image status must be ``queued``. (This indicates that no image data has yet been associated with the image.) - The body of your request must indicate that you are using the ``web-download`` import method, and it must contain the URL at which the data is to be found. .. note:: The acceptable set of URLs for the ``web-download`` import method may be restricted in a particular cloud. Consult the cloud's local documentation for details. If you are using the ``copy-image`` import method: - The image status must be ``active``. (This indicates that image data is associated with the image.) - The body of your request must indicate that you are using the ``copy-image`` import method, and it must contain either the list of stores where you want to copy your image or all_stores which will copy the image in all available stores set in glance_api.conf using ``enabled_backends`` configuration option. - If body of your request contains ``all_stores_must_succeed`` (default to True) and an error occurs during the copying in at least one store, the request will be rejected, the data will be deleted from the new stores where copying is done (not staging), and the state of the image remains the same. - If body of your request contains ``all_stores_must_succeed`` set to False and an error occurs, then the request will fail (data deleted from stores, ...) only if the copying fails on all stores specified by the user. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. - By default, you may perform the copy-image operation only on images that you own. This action is governed by policy, so some users may be granted permission to copy unowned images. Consult your cloud's local documentation for details. If you are using the ``glance-download`` import method: - The image status must be ``queued``. (This indicates that no image data has yet been associated with the image.) - The body of your request must indicate that you are using the ``glance-download`` import method, and it must contain the region name of the remote openstack region and the image id to fetch. You might optionaly set the service interface name (public by default) to request. **Synchronous Postconditions** - With correct permissions, you can see the image status as ``importing`` (only for glance-direct, web-download and glance-download import methods) through API calls. (Be aware, however, that if the import process completes before you make the API call, the image may already show as ``active``.) Normal response codes: 202 Error response codes: 400, 401, 403, 404, 405, 409, 410, 413, 415, 503 If the image import process is not enabled in your cloud, this request will result in a 404 response code with an appropriate message. Request ------- .. rest_parameters:: images-parameters.yaml - Content-type: Content-Type-json - X-Image-Meta-Store: store-header - image_id: image_id-in-path - method: method-in-request - all_stores: all-stores-in-request - all_stores_must_succeed: all-stores-succeed-in-request - stores: stores-in-request Request Example - glance-direct import method --------------------------------------------- .. literalinclude:: samples/image-import-g-d-request.json :language: json Request Example - web-download import method -------------------------------------------- .. literalinclude:: samples/image-import-w-d-request.json :language: json Request Example - copy-image import method -------------------------------------------- .. literalinclude:: samples/image-import-c-i-request.json :language: json Request Example - glance-download import method ----------------------------------------------- .. literalinclude:: samples/image-import-gd-request.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-parameters-descriptions.inc0000664000175000017500000000341000000000000025033 0ustar00zuulzuul00000000000000.. |p-start| raw:: html

.. |p-end| raw:: html

.. |html-br| raw:: html
.. |disk_format_description| replace:: |p-start|\ The format of the disk.\ |p-end| |p-start|\ Values may vary based on the configuration available in a particular OpenStack cloud. See the :ref:`Image Schema ` response from the cloud itself for the valid values available. See `Disk Format `__ in the Glance documentation for more information.\ |p-end| |p-start|\ Example formats are: ``ami``, ``ari``, ``aki``, ``vhd``, ``vhdx``, ``vmdk``, ``raw``, ``qcow2``, ``vdi``, ``ploop`` or ``iso``.\ |p-end| |p-start|\ The value might be ``null`` (JSON null data type).\ |p-end| |p-start|\ **Newton changes**: The ``vhdx`` disk format is a supported value.\ |html-br| **Ocata changes**: The ``ploop`` disk format is a supported value.\ |p-end| .. |container_format_description| replace:: |p-start|\ Format of the image container.\ |p-end| |p-start|\ Values may vary based on the configuration available in a particular OpenStack cloud. See the :ref:`Image Schema ` response from the cloud itself for the valid values available. See `Container Format `__ in the Glance documentation for more information.\ |p-end| |p-start|\ Example formats are: ``ami``, ``ari``, ``aki``, ``bare``, ``ovf``, ``ova``, ``docker``, or ``compressed``.\ |p-end| |p-start|\ The value might be ``null`` (JSON null data type).\ |p-end| |p-start|\ **Train changes**: The ``compressed`` container format is a supported value.\ |p-end| ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-parameters.yaml0000664000175000017500000005237300000000000022534 0ustar00zuulzuul00000000000000# variables in header cache-clear-header: description: | A keyword indicating 'cache', 'queue' or empty string to indicate the delete API to delete images from cache or queue or delete from both. If this header is missing then all cached and queued images for caching will be deleted. in: header required: false type: string Content-Length: description: | The length of the body in octets (8-bit bytes) in: header required: true type: string Content-Md5: description: | The MD5 checksum of the body. in: header required: true type: string Content-Range: description: | The content range of image data. For details, see `Hypertext Transfer Protocol (HTTP/1.1): Range Requests `_. in: header required: false type: string Content-Type-data: description: | The media type descriptor for the request body. Use ``application/octet-stream`` in: header required: true type: string Content-Type-data-response: description: | The media type descriptor of the response body, namely ``application/octet-stream`` in: header required: true type: string Content-Type-json: description: | The media type descriptor for the request body. Use ``application/json``. in: header required: true type: string Content-Type-patch: description: | The media type descriptor for the request body. Use ``application/openstack-images-v2.1-json-patch``. (You can also use ``application/openstack-images-v2.0-json-patch``, but keep in mind that it's deprecated.) in: header required: true type: string import-header: description: | A comma separated list of import method identifiers. Included only if image import is enabled in your cloud. *Since Image API v2.6* in: header required: false type: string Location: description: | The URL to access the image file from the external store. in: header required: true type: string Range: description: | The range of image data requested. Note that multi range requests are not supported. For details, see `Hypertext Transfer Protocol (HTTP/1.1): Range Requests `_. in: header required: false type: string store-header: description: | A store identifier to upload or import image data. Should only be included when making a request to a cloud that supports multiple backing stores. Use the :ref:`Store Discovery ` call to determine an appropriate store identifier. Simply omit this header to use the default store. *(Since Image API v2.8)* in: header required: false type: string stores-header: description: | A comma separated list of available store identifiers. If this header is missing the cloud does not support multiple backend stores. in: header required: false type: string # variables in path image_id-in-path: description: | The UUID of the image. in: path required: true type: string member_id-in-path: description: | The ID of the image member. An image member is usually the project (also called the "tenant") with whom the image is shared. in: path required: true type: string store-in-path: description: | The ID of the store from which image is to be deleted. in: path required: true type: string tag-in-path: description: | The image tag. A tag is limited to 255 chars in length. You may wish to use characters that can easily be written in a URL. in: path required: true type: string # variables in query created_at-in-query: description: | Specify a *comparison filter* based on the date and time when the resource was created. (See :ref:`Time Comparison Filters `). The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. in: query required: false type: string limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string member_status-in-query: description: | Filters the response by a member status. A valid value is ``accepted``, ``pending``, ``rejected``, or ``all``. Default is ``accepted``. in: query required: false type: string name-in-query: description: | Filters the response by a name, as a string. A valid value is the name of an image. in: query required: false type: string os_hidden-in-query: description: | When ``true``, filters the response to display only "hidden" images. By default, "hidden" images are not included in the image-list response. *(Since Image API v2.7)* in: query required: false type: boolean owner-in-query: description: | Filters the response by a project (also called a "tenant") ID. Shows only images that are shared with you by the specified owner. in: query required: false type: string protected-in-query: description: | Filters the response by the 'protected' image property. A valid value is one of 'true', 'false' (must be all lowercase). Any other value will result in a 400 response. in: query required: false type: boolean size_max: description: | Filters the response by a maximum image size, in bytes. in: query required: false type: string size_min: description: | Filters the response by a minimum image size, in bytes. in: query required: false type: string sort: description: | Sorts the response by one or more attribute and sort direction combinations. You can also set multiple sort keys and directions. Default direction is ``desc``. Use the comma (``,``) character to separate multiple values. For example: .. code-block:: none GET /v2/images?sort=name:asc,status:desc in: query required: false type: string sort_dir: description: | Sorts the response by a set of one or more sort direction and attribute (``sort_key``) combinations. A valid value for the sort direction is ``asc`` (ascending) or ``desc`` (descending). If you omit the sort direction in a set, the default is ``desc``. in: query required: false type: string sort_key: description: | Sorts the response by an attribute, such as ``name``, ``id``, or ``updated_at``. Default is ``created_at``. The API uses the natural sorting direction of the ``sort_key`` image attribute. in: query required: false type: string status-in-query: description: | Filters the response by an image status. in: query required: false type: integer tag-in-query: description: | Filters the response by the specified tag value. May be repeated, but keep in mind that you're making a conjunctive query, so only images containing *all* the tags specified will appear in the response. in: query required: false type: string updated_at-in-query: description: | Specify a *comparison filter* based on the date and time when the resource was most recently modified. (See :ref:`Time Comparison Filters `). The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. in: query required: false type: string visibility-in-query: description: | Filters the response by an image visibility value. A valid value is ``public``, ``private``, ``community``, ``shared``, or ``all``. (Note that if you filter on ``shared``, the images included in the response will only be those where your member status is ``accepted`` unless you explicitly include a ``member_status`` filter in the request.) If you omit this parameter, the response shows ``public``, ``private``, and those ``shared`` images with a member status of ``accepted``. in: query required: false type: string # variables in body all-stores-in-request: description: | When set to True the data will be imported to the set of stores you may consume from this particular deployment of Glance (ie: the same set of stores returned to a call to /v2/info/stores on the glance-api the request hits). This can't be used simultaneously with the ``stores`` parameter. in: body required: false type: boolean all-stores-succeed-in-request: description: | A boolean parameter indicating the behavior of the import workflow when an error occurs. When set to True (default), if an error occurs during the upload in at least one store, the worfklow fails, the data is deleted from stores where copying is done (not staging), and the state of the image is unchanged. When set to False, the workflow will fail (data deleted from stores, ...) only if the import fails on all stores specified by the user. In case of a partial success, the locations added to the image will be the stores where the data has been correctly uploaded. Default is True. in: body required: false type: boolean checksum: description: | An MD5 hash over the image data. The value might be ``null`` (JSON null data type), as this field is no longer populated by the Image Service beginning with the Victoria release. It remains present for backward compatibility with legacy images. To validate image data, instead use the secure multihash fields ``os_hash_algo`` and ``os_hash_value``. in: body required: true type: string container_format: description: | |container_format_description| in: body required: true type: enum container_format-in-request: description: | |container_format_description| in: body required: false type: enum created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string direct_url: description: | The URL to access the image file kept in external store. *It is present only if the* ``show_image_direct_url`` *option is* ``true`` *in the Image service's configuration file.* **Because it presents a security risk, this option is disabled by default.** in: body required: false type: string disk_format: description: | |disk_format_description| in: body required: true type: enum disk_format-in-request: description: | |disk_format_description| in: body required: false type: enum file: description: | The URL for the virtual machine image file. in: body required: true type: string first: description: | The URI for the first page of response. in: body required: true type: string id: description: | A unique, user-defined image UUID, in the format: :: nnnnnnnn-nnnn-nnnn-nnnn-nnnnnnnnnnnn Where **n** is a hexadecimal digit from 0 to f, or F. For example: :: b2173dd3-7ad6-4362-baa6-a68bce3565cb If you omit this value, the API generates a UUID for the image. in: body required: true type: string id-in-request: description: | A unique, user-defined image UUID, in the format: :: nnnnnnnn-nnnn-nnnn-nnnn-nnnnnnnnnnnn Where **n** is a hexadecimal digit from 0 to f, or F. For example: :: b2173dd3-7ad6-4362-baa6-a68bce3565cb If you omit this value, the API generates a UUID for the image. If you specify a value that has already been assigned, the request fails with a ``409`` response code. in: body required: false type: string image_id-in-body: description: | The UUID of the image. in: body required: true type: string images: description: | A list of *image* objects, as described by the :ref:`Images Schema `. in: body required: true type: array import-methods: description: | A JSON object containing a ``value`` element, which is an array of string identifiers indicating what import methods are available in the cloud in which the call is made. This list may be empty. in: body required: true type: object locations: description: | A list of objects, each of which describes an image location. Each object contains a ``url`` key, whose value is a URL specifying a location, and a ``metadata`` key, whose value is a dict of key:value pairs containing information appropriate to the use of whatever external store is indicated by the URL. *This list appears only if the* ``show_multiple_locations`` *option is set to* ``true`` *in the Image service's configuration file.* **Because it presents a security risk, this option is disabled by default.** in: body required: false type: array locations-url: description: | The URL of the new location to be added in the image. in: body required: true type: string member_id: description: | The ID of the image member. An image member is usually a project (also called the "tenant") with whom the image is shared. in: body required: true type: string member_status: description: | The status of this image member. Value is one of ``pending``, ``accepted``, ``rejected``. in: body required: true type: string members: description: | A list of *member* objects, as described by the :ref:`Image Members Schema `. Each *member* object describes a member with whom this image is being shared. in: body required: true type: array method-in-request: description: | A JSON object indicating what import method you wish to use to import your image. The content of this JSON object is another JSON object with a ``name`` field whose value is the identifier for the import method. in: body required: true type: object min_disk: description: | Amount of disk space in GB that is required to boot the image. The value might be ``null`` (JSON null data type). in: body required: true type: integer min_disk-in-request: description: | Amount of disk space in GB that is required to boot the image. in: body required: false type: integer min_ram: description: | Amount of RAM in MB that is required to boot the image. The value might be ``null`` (JSON null data type). in: body required: true type: integer min_ram-in-request: description: | Amount of RAM in MB that is required to boot the image. in: body required: false type: integer name: description: | The name of the image. Value might be ``null`` (JSON null data type). in: body required: true type: string name-in-request: description: | The name of the image. in: body required: false type: string next: description: | The URI for the next page of response. Will not be present on the last page of the response. in: body required: true type: string os_hash_algo: description: | The algorithm used to compute a secure hash of the image data for this image. The result of the computation is displayed as the value of the ``os_hash_value`` property. The value might be ``null`` (JSON null data type). The algorithm used is chosen by the cloud operator; it may not be configured by end users. *(Since Image API v2.7)* in: body required: true type: string os_hash_value: description: | The hexdigest of the secure hash of the image data computed using the algorithm whose name is the value of the ``os_hash_algo`` property. The value might be ``null`` (JSON null data type) if data has not yet been associated with this image, or if the image was created using a version of the Image Service API prior to version 2.7. *(Since Image API v2.7)* in: body required: true type: string os_hidden: description: | This field controls whether an image is displayed in the default image-list response. A "hidden" image is out of date somehow (for example, it may not have the latest updates applied) and hence should not be a user's first choice, but it's not deleted because it may be needed for server rebuilds. By hiding it from the default image list, it's easier for end users to find and use a more up-to-date version of this image. *(Since Image API v2.7)* in: body required: true type: boolean owner: description: | An identifier for the owner of the image, usually the project (also called the "tenant") ID. The value might be ``null`` (JSON null data type). in: body required: true type: string protected: description: | A boolean value that must be ``false`` or the image cannot be deleted. in: body required: true type: boolean protected-in-request: description: | Image protection for deletion. Valid value is ``true`` or ``false``. Default is ``false``. in: body required: false type: boolean schema-image: description: | The URL for the schema describing a virtual machine image. in: body required: true type: string schema-images: description: | The URL for the schema describing a list of images. in: body required: true type: string schema-member: description: | The URL for the schema describing an image member. in: body required: true type: string schema-members: description: | The URL for the schema describing an image member list. in: body required: true type: string self: description: | The URL for the virtual machine image. in: body required: true type: string size: description: | The size of the image data, in bytes. The value might be ``null`` (JSON null data type). in: body required: true type: integer status: description: | The image status. in: body required: true type: string stores-in-request: description: | If present contains the list of store id to import the image binary data to. in: body required: false type: array tags: description: | List of tags for this image, possibly an empty list. in: body required: true type: array tags-in-request: description: | List of tags for this image. Each tag is a string of at most 255 chars. The maximum number of tags allowed on an image is set by the operator. in: body required: false type: array tasks: description: | A list of *task* objects, associated with the given image. in: body required: true type: array updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string url: description: | The URL to access the image file kept in external store. in: body required: true type: string validation-data: description: | An image metadata in key:value pairs containing values of ``os_hash_value`` and ``os_hash_algo`` to be added to the image. If ``do_secure_hash`` is not passed then it is the responsiblity of the consumer of location add API to provide the correct values in ``validation_data``' in: body required: false type: object value: description: | Value of image property used in add or replace operations expressed in JSON notation. For example, you must enclose strings in quotation marks, and you do not enclose numeric values in quotation marks. in: body required: true type: string virtual_size: description: | The virtual size of the image. The value might be ``null`` (JSON null data type). in: body required: true type: integer visibility: description: | Image visibility, that is, the access permission for the image. in: body required: true type: string visibility-in-request: description: | Visibility for this image. Valid value is one of: ``public``, ``private``, ``shared``, or ``community``. At most sites, only an administrator can make an image ``public``. Some sites may restrict what users can make an image ``community``. Some sites may restrict what users can perform member operations on a ``shared`` image. *Since the Image API v2.5, the default value is ``shared``.* in: body required: false type: string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-schemas.inc0000664000175000017500000000520000000000000021606 0ustar00zuulzuul00000000000000.. -*- rst -*- .. note: You can get a 400 on a GET if you pass a request body (see router.py) Image Schemas ************* Gets a JSON-schema document that represents the various entities talked about by the Images v2 API. .. _images-schema: Show images schema ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/images *(Since Images v2.0)* Shows a JSON schema document that represents an *images* entity. An images entity is a container of image entities. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 400, 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-images-list-response.json :language: json .. _image-schema: Show image schema ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/image *(Since Images v2.0)* Shows a JSON schema document that represents an *image* entity. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 400, 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-image-show-response.json :language: json .. _image-members-schema: Show image members schema ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/members *(Since Images v2.1)* Shows a JSON schema document that represents an *image members* entity. An image members entity is a container of image member entities. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 400, 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-image-members-list-response.json :language: json .. _image-member-schema: Show image member schema ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/member *(Since Images v2.1)* Shows a JSON schema document that represents an *image member* entity. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 400, 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-image-member-show-response.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-sharing-v2.inc0000664000175000017500000002217200000000000022152 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _image-sharing: Sharing ******* Images may be shared among projects by creating *members* on the image. Image members have read-only privileges on the image. The following calls allow you to create, list, update, and delete image members. .. note:: An image member is an identifier for a consumer with whom the image is shared. In OpenStack clouds, where the value of the ``owner`` property of an image is a project ID, the appropriate identifier to use for the ``member_id`` is the consumer's project ID (which used to be called the "tenant ID"). * Image sharing is project-to-project. Thus *all the individual users in the consuming project have access to the image*. You cannot share an image with only one specific user in the target project. When an image is shared, the member is given immediate access to the image. In order to prevent spamming other users' image lists, a shared image does not appear in a member's image list until the member "accepts" the image. Only the image owner may create members. Only an image member may modify his or her member status. .. TODO(rosmaita): update the following reference when the "narrative" API docs have a final resting place For a conceptual overview of image sharing, including a suggested workflow, please consult `Image API v2 Sharing`_. .. _Image API v2 Sharing: http://specs.openstack.org/openstack/glance-specs/specs/api/v2/sharing-image-api-v2.html .. note:: If you don't want to maintain a sharing relationship with particular image consumers, but instead want to make an image available to *all* users, you may update your image's ``visibility`` property to ``community``. * In some clouds, the ability to "communitize" an image may be prohibited or restricted to trusted users. Please consult your cloud's local documentation for details. Create image member ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/images/{image_id}/members Adds a tenant ID as an image member. *(Since Image API v2.1)* Preconditions - The image must exist. - The image must have a ``visibility`` value of ``shared``. - You must be the owner of the image. Synchronous Postconditions - With correct permissions, you can see the member status of the image member as ``pending`` through API calls. Troubleshooting - Even if you have correct permissions, if the ``visibility`` attribute is not set to ``shared``, the request returns the HTTP ``403`` response code. Ensure that you meet the preconditions and run the request again. If the request fails again, review your API request. - If the member is already a member for the image, the service returns the ``Conflict (409)`` response code. If you meant to specify a different member, run the request again. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409, 413 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - member: member_id Request Example --------------- .. literalinclude:: samples/image-member-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - created_at: created_at - image_id: image_id-in-body - member_id: member_id - schema: schema-member - status: member_status - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/image-member-create-response.json :language: json Show image member details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id}/members/{member_id} Shows image member details. *(Since Image API v2.1)* Response body is a single image member entity. Preconditions - The image must exist. - The image must have a ``visibility`` value of ``shared``. - You must be the owner or the member of the image who's referenced in the call. Normal response codes: 200 Error response codes: 400, 401, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - member_id: member_id-in-path Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - created_at: created_at - image_id: image_id-in-body - member_id: member_id - schema: schema-member - status: member_status - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/image-member-details-response.json :language: json List image members ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/images/{image_id}/members Lists the tenants that share this image. *(Since Image API v2.1)* If the image owner makes this call, the complete member list is returned. If a user who is an image member makes this call, the member list contains only information for that user. If a user who is not an image member makes this call, the call returns the HTTP ``404`` response code. Preconditions - The image must exist. - The image must have a ``visibility`` value of ``shared``. - You must be the owner or a member of the image. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - members: members - schema: schema-members Response Example ---------------- .. literalinclude:: samples/image-members-list-response.json :language: json Update image member ~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/images/{image_id}/members/{member_id} Sets the status for an image member. *(Since Image API v2.1)* This call allows an image member to change his or her *member status*. When an image is shared with you, you have immediate access to the image. What updating your member status on the image does for you is that it affects whether the image will appear in your image list response. - When an image is shared with you, your member_status is ``pending``. You won't see the image unless you go looking for it, either by making a show image detail request using the image's ID, or by making an image list call specifically looking for a shared image in member status ``pending``. This way, other users cannot "spam" your image list with images you may not want to see. - If you want to see a particular shared image in your image list, then you must use this call to change your member status on the image to ``accepted``. - The image owner can see what your member status is on an image, but the owner *cannot* change the status. Only you (or an administrator) can do that. - There are three member status values: ``pending``, ``accepted``, and ``rejected``. The ``pending`` and ``rejected`` statuses are functionally identical. The difference is that ``pending`` indicates to the owner that you haven't updated the image, so perhaps you aren't aware that it's been shared with you. The ``rejected`` status indicates that you are aware that the image exists and you specifically decided that you don't want to see it in your image list response. For a more detailed discussion of image sharing, please consult `Image API v2 Sharing`_. Preconditions - The image must exist. - The image must have a ``visibility`` value of ``shared``. - You must be the member of the image referenced in the call. Synchronous Postconditions - If you update the member status to ``accepted`` and have the correct permissions, you see the image in list images responses. - With correct permissions, you can make API calls to see the updated member status of the image. Normal response codes: 200 Error response codes: 400, 401, 404, 403 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - member_id: member_id-in-path - status: member_status Request Example --------------- .. literalinclude:: samples/image-member-update-request.json :language: json Response Parameters ------------------- .. rest_parameters:: images-parameters.yaml - created_at: created_at - image_id: image_id-in-body - member_id: member_id - schema: schema-member - status: member_status - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/image-member-update-response.json :language: json Delete image member ~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/images/{image_id}/members/{member_id} Deletes a tenant ID from the member list of an image. *(Since Image API v2.1)* Preconditions - The image must exist. - The image must have a ``visibility`` value of ``shared``. - You must be the owner of the image. Synchronous Postconditions - The API removes the member from the image members. Troubleshooting - Even if you have correct permissions, if you are not the owner of the image or you specify an incorrect image ID or member ID, the call returns the HTTP ``403`` or ``404`` response code. Ensure that you meet the preconditions and run the request again. If the request fails again, review your API request URI. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - member_id: member_id-in-path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/images-tags.inc0000664000175000017500000000144300000000000021126 0ustar00zuulzuul00000000000000.. -*- rst -*- Image tags ********** Adds and deletes image tags. Image tags may also be modfied by the :ref:`v2-image-update` call. Add image tag ~~~~~~~~~~~~~ .. rest_method:: PUT /v2/images/{image_id}/tags/{tag} Adds a tag to an image. *(Since Image API v2.0)* Normal response codes: 204 Error response codes: 400, 401, 403, 404, 413 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - tag: tag-in-path Delete image tag ~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/images/{image_id}/tags/{tag} Deletes a tag from an image. *(Since Image API v2.0)* Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: images-parameters.yaml - image_id: image_id-in-path - tag: tag-in-path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/index.rst0000664000175000017500000000220300000000000020066 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. :tocdepth: 3 ============================== Image Service API v2 (CURRENT) ============================== .. rest_expand_all:: .. include:: images-parameters-descriptions.inc .. include:: images-images-v2.inc .. include:: images-sharing-v2.inc .. include:: images-tags.inc .. include:: images-schemas.inc .. include:: images-data.inc .. include:: images-import.inc .. include:: stores.inc .. include:: discovery.inc .. include:: tasks.inc .. include:: tasks-schemas.inc .. include:: cache-manage.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-index.rst0000664000175000017500000000533000000000000021660 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. :tocdepth: 3 ============================================= Metadata Definitions Service API v2 (CURRENT) ============================================= .. rest_expand_all:: Metadefs ******** General information ~~~~~~~~~~~~~~~~~~~ The Metadata Definitions Service ("metadefs", for short) provides a common API for vendors, operators, administrators, services, and users to meaningfully define available key:value pairs that can be used on different types of cloud resources (for example, images, artifacts, volumes, flavors, aggregates, and other resources). To get you started, Glance contains a default catalog of metadefs that may be installed at your site; see the `README `_ in the code repository for details. Once a common catalog of metadata definitions has been created, the catalog is available for querying through the API. Note that this service stores only the *catalog*, because metadefs are meta-metadata. Metadefs provide information *about* resource metadata, but do not themselves serve as actual metadata. Actual key:value pairs are stored on the resources to which they apply using the metadata facilities provided by the appropriate API. (For example, the Images API would be used to put specific key:value pairs on a virtual machine image.) A metadefs definition includes a property's key, its description, its constraints, and the resource types to which it can be associated. See `Metadata Definition Concepts `_ in the Glance Developer documentation for more information. .. note:: By default, only admins can manipulate the data exposed by this API, but all users may list and show public resources. This changed from a default of "open to all" in the Wallaby release. .. include:: metadefs-namespaces.inc .. include:: metadefs-resourcetypes.inc .. include:: metadefs-namespaces-objects.inc .. include:: metadefs-namespaces-properties.inc .. include:: metadefs-namespaces-tags.inc .. include:: metadefs-schemas.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-namespaces-objects.inc0000664000175000017500000001466100000000000024267 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition objects *************************** Creates, lists, shows details for, updates, and deletes metadata definition objects. *Since API v2.2* Create object ~~~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/objects Creates an object definition in a namespace. Normal response codes: 201 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - name: object-name - description: object-description-in-request - properties: object-properties-in-request - required: object-required-in-request Request Example --------------- .. literalinclude:: samples/metadef-object-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - description: object-description - name: object-name - properties: object-properties - required: object-required - schema: object-schema - self: self - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-object-create-response.json :language: json List objects ~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/objects Lists object definitions in a namespace. Returns a subset of the larger collection of namespaces and a link that you can use to get the next set of namespaces. You should always check for the presence of a ``next`` link and use it as the URI in a subsequent HTTP GET request. You should follow this pattern until a ``next`` link is no longer provided. The next link preserves any query parameters that you send in your initial request. You can use the ``first`` link to jump back to the first page of the collection. If you prefer to paginate through namespaces manually, use the ``limit`` and ``marker`` parameters. Use the ``resource_types`` and ``visibility`` query parameters to filter the response. For example, set the ``resource_types`` query parameter to ``OS::Glance::Image,OS::Nova::Flavor`` to filter the response to include only namespaces that are associated with the given resource types. You can sort the results of this operation by using the ``sort_key`` and ``sort_dir`` parameters. The API uses the natural sorting of whatever namespace attribute is provided as the ``sort_key``. Normal response codes: 200 Error response codes: 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - visibility: visibility-in-query - resource_types: resource_types-in-query - sort_key: sort_key - sort_dir: sort_dir Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - display_name: display_name - description: description - namespace: namespace - visibility: visibility - protected: protected - namespaces: namespaces - resource_type_associations: resource_type_associations Response Example ---------------- .. literalinclude:: samples/metadef-objects-list-response.json :language: json Show object ~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} Shows the definition for an object. The response body shows a single object entity. Normal response codes: 200 .. yep, 400 if the request includes a body Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - object_name: object_name There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - description: object-description - name: object-name - properties: object-properties - required: object-required - schema: object-schema - self: self - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-object-details-response.json :language: json Update object ~~~~~~~~~~~~~ .. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} Updates an object definition in a namespace. The object resource is completely replaced by what you specify in the request body. Thus, if you leave out any of the optional parameters, and they exist in the current object, they will be eliminated by this call. It is possible to change the name of the object with this call; if you do, note that the URL for the object (specified by the ``self`` field) will change. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - object_name: object_name - name: object-name - description: object-description-in-request - properties: object-properties-in-request - required: object-required-in-request Request Example --------------- .. literalinclude:: samples/metadef-object-update-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - description: object-description - name: object-name - properties: object-properties - required: object-required - schema: object-schema - self: self - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-object-update-response.json :language: json Delete object ~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/objects/{object_name} Deletes an object definition from a namespace. .. note:: If the namespace containing the object is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to delete the object. * If you try to delete an object from a protected namespace, the call returns the ``403`` response code. * To change the ``protected`` attribute of a namespace, use the :ref:`Update namespace ` call. When you successfully delete an object from a namespace, the response is empty and the response code is ``204``. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - object_name: object_name There is no request body. There is no response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-namespaces-properties.inc0000664000175000017500000001434600000000000025032 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition properties ****************************** Creates, lists, shows details for, updates, and deletes metadata definition properties. *Since API v2.2* Create property ~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/properties Creates a property definition in a namespace. The schema is a subset of the JSON property definition schema. Normal response codes: 201 Error response codes: 400, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - name: name - title: title - type: type - additionalItems: additionalItems - description: property-description-in-request - default: default - items: items - operators: operators - enum: enum - maximum: maximum - minItems: minItems - readonly: readonly - minimum: minimum - maxItems: maxItems - maxLength: maxLength - uniqueItems: uniqueItems - pattern: pattern - minLength: minLength Request Example --------------- .. literalinclude:: samples/metadef-property-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - additionalItems: additionalItems - description: property-description - title: title - default: default - items: items - operators: operators - enum: enum - maximum: maximum - minItems: minItems - readonly: readonly - minimum: minimum - maxItems: maxItems - maxLength: maxLength - uniqueItems: uniqueItems - pattern: pattern - type: type - minLength: minLength - name: name Response Example ---------------- .. literalinclude:: samples/metadef-property-create-response.json :language: json List properties ~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/properties Lists property definitions in a namespace. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - properties: properties-dict Response Example ---------------- .. literalinclude:: samples/metadef-properties-list-response.json :language: json Show property definition ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} Shows the definition for a property. If you use the ``resource_type`` query parameter, the API removes the prefix of the resource type from the property name before it submits the query. This enables you to look for a property name that starts with a prefix from an associated resource type. The response body shows a single property entity. Normal response codes: 200 Error response codes: 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - property_name: property_name - namespace_name: namespace_name - resource_type: resource_type-in-query Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - additionalItems: additionalItems - description: property-description - title: title - default: default - items: items - operators: operators - enum: enum - maximum: maximum - minItems: minItems - readonly: readonly - minimum: minimum - maxItems: maxItems - maxLength: maxLength - uniqueItems: uniqueItems - pattern: pattern - type: type - minLength: minLength - name: name Response Example ---------------- .. literalinclude:: samples/metadef-property-details-response.json :language: json Update property definition ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} Updates a property definition. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - property_name: property_name - name: name-property - title: title - type: type - additionalItems: additionalItems - description: description - default: default - items: items - operators: operators - enum: enum - maximum: maximum - minItems: minItems - readonly: readonly - minimum: minimum - maxItems: maxItems - maxLength: maxLength - uniqueItems: uniqueItems - pattern: pattern - minLength: minLength Request Example --------------- .. literalinclude:: samples/metadef-property-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - additionalItems: additionalItems - description: description - title: title - default: default - items: items - operators: operators - enum: enum - maximum: maximum - minItems: minItems - readonly: readonly - minimum: minimum - maxItems: maxItems - maxLength: maxLength - uniqueItems: uniqueItems - pattern: pattern - type: type - minLength: minLength - name: name-property Response Example ---------------- .. literalinclude:: samples/metadef-property-update-response.json :language: json Remove property definition ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/properties/{property_name} Removes a property definition from a namespace. .. note:: If the namespace containing the property is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to delete the property. * If you try to delete a property from a protected namespace, the call returns the ``403`` response code. * To change the ``protected`` attribute of a namespace, use the :ref:`Update namespace ` call. When you successfully delete a property from a namespace, the response is empty and the response code is ``204``. Normal response codes: 204 Error response codes: 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - property_name: property_name - namespace_name: namespace_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-namespaces-tags.inc0000664000175000017500000001337400000000000023574 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition tags ************************ Creates, lists, shows details for, updates, and deletes metadata definition tags. *Since API v2.2* Create tag definition ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} Adds a tag to the list of namespace tag definitions. Normal response codes: 201 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - tag_name: tag_name There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - name: name-tag - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-tag-create-response.json :language: json Get tag definition ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} Gets a definition for a tag. The response body shows a single tag entity. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - tag_name: tag_name - namespace_name: namespace_name There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - name: name-tag - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-tag-details-response.json :language: json Update tag definition ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} Renames a tag definition. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - tag_name: tag_name - namespace_name: namespace_name - name: name-tag Request Example --------------- .. literalinclude:: samples/metadef-tag-update-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - name: name-tag - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/metadef-tag-update-response.json :language: json Delete tag definition ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/tags/{tag_name} Deletes a tag definition within a namespace. .. note:: If the namespace containing the tag is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to delete the tag. * If you try to delete a tag from a protected namespace, the call returns the ``403`` response code. * To change the ``protected`` attribute of a namespace, use the :ref:`Update namespace ` call. When you successfully delete a tag from a namespace, the response is empty and the response code is ``204``. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - tag_name: tag_name Create tags ~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/tags Creates one or more tag definitions in a namespace. Normal response codes: 201 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - X-Openstack-Append: append - namespace_name: namespace_name - tags: tags Request Example --------------- .. literalinclude:: samples/metadef-tags-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - name: name - tags: tags Response Example ---------------- .. literalinclude:: samples/metadef-tags-create-response.json :language: json List tags ~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/tags Lists the tag definitions within a namespace. To manually paginate through the list of tags, use the ``limit`` and ``marker`` parameters. To sort the results of this operation use the ``sort_key`` and ``sort_dir`` parameters. The API uses the natural sort order of the tag attribute of the ``sort_key`` parameter. Normal response codes: 200 Error response codes: 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - limit: limit-tags - marker: marker-tags - sort_key: sort_key-tags - sort_dir: sort_dir There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - tags: tags Response Example ---------------- .. literalinclude:: samples/metadef-tags-list-response.json :language: json Delete all tag definitions ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/tags Deletes all tag definitions within a namespace. .. note:: If the namespace containing the tags is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to delete the tags. If you try to delete the tags from a protected namespace, the call returns the ``403`` response code. When you successfully delete the tags from a namespace, the response is empty and the response code is ``204``. Normal response codes: 204 Error response codes: 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name There is no request body. There is no response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-namespaces.inc0000664000175000017500000002110400000000000022626 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition namespaces ****************************** Creates, lists, shows details for, updates, and deletes metadata definition namespaces. Defines namespaces that can contain property definitions, object definitions, and resource type associations. *Since API v2.2* Create namespace ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces Creates a namespace. A namespace must be unique across all users. Attempting to create an already existing namespace will result in a 409 (Conflict) response. The ``Location`` response header contains the newly-created URI for the namespace. Normal response codes: 201 Error response codes: 400, 401, 403, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace: namespace - display_name: display_name - description: description - visibility: visibility-in-request - protected: protected-in-request The request body may also contain properties, objects, and resource type associations, or these can be added later by the :ref:`v2-update-namespace` call. Request Example --------------- .. literalinclude:: samples/metadef-namespace-create-request-simple.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - Location: Location - created_at: created_at - description: description - display_name: display_name - namespace: namespace - owner: owner - protected: protected - schema: schema-namespace - self: self - updated_at: updated_at - visibility: visibility If the request body contained properties, objects, or resource type associations, these will be included in the response. Response Example ---------------- .. code-block:: console HTTP/1.1 201 Created Content-Length: 427 Content-Type: application/json; charset=UTF-8 Location: http://glance.openstack.example.org/v2/metadefs/namespaces/FredCo::SomeCategory::Example X-Openstack-Request-Id: req-6d4a8ad2-c018-4bfc-8fe5-1a36c23c43eb Date: Thu, 19 May 2016 16:05:48 GMT .. literalinclude:: samples/metadef-namespace-create-response-simple.json :language: json List namespaces ~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces Lists available namespaces. Returns a list of namespaces to which the authenticated user has access. If the list is too large to fit in a single response, either because of operator configuration or because you've included a ``limit`` query parameter in the request to restrict the response size, the response will contain a link that you can use to get the next page of namespaces. Check for the presence of a ``next`` link and use it as the URI in a subsequent HTTP GET request. Follow this pattern until a ``next`` link is no longer provided. The ``next`` link preserves any query parameters that you send in your initial request. You can use the ``first`` link to return to the first page in the collection. If you prefer to paginate through namespaces manually, use the ``limit`` and ``marker`` parameters. The list operation accepts the ``resource_types`` and ``visibility`` query parameters, which you can use to filter the response. To sort the results of this operation, use the ``sort_key`` and ``sort_dir`` parameters. The API uses the natural sorting order in the namespace attribute that you provide as the ``sort_key`` parameter. Normal response codes: 200 Error response codes: 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - limit: limit - marker: marker - visibility: visibility-in-query - resource_types: resource_types-in-query - sort_key: sort_key - sort_dir: sort_dir Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - first: first - namespaces: namespaces - next: next - schema: schema-namespaces Response Example ---------------- .. literalinclude:: samples/metadef-namespaces-list-response.json :language: json Get namespace details ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name} Gets details for a namespace. The response body shows a single namespace entity with all details including properties, objects, and resource type associations. If the namespace contains a resource type association that specifies a prefix, you may optionally include the name of the resource type as a query parameter. In that case, the prefix will be applied to all property names in the response. (See below for an example.) Normal response codes: 200 .. returns 400 if a request body is sent Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - resource_type: resource_type-in-query-namespace-detail The request does not take a body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - description: description - display_name: display_name - namespace: namespace - objects: objects - owner: owner - properties: properties-dict - protected: protected - resource_type_associations: resource_type_associations - schema: schema-namespace - self: self - visibility: visibility Response Example ---------------- .. literalinclude:: samples/metadef-namespace-details-response.json :language: json Response Example (with resource_type query parameter) ----------------------------------------------------- This is the result of the following request: ``GET /v2/metadefs/namespaces/OS::Compute::Libvirt?resource_type=OS::Glance::Image`` Note that the name of each property has had the appropriate prefix applied to it. .. literalinclude:: samples/metadef-namespace-details-with-rt-response.json :language: json .. _v2-update-namespace: Update namespace ~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/metadefs/namespaces/{namespace_name} Updates a namespace. .. note:: Be careful using this call, especially when all you want to do is change the ``protected`` value so that you can delete some objects, properties, or resource type associations in the namespace. While only the ``namespace`` is required in the request body, if this call is made with *only* the ``namespace`` in request body, the other attributes listed below will be set to their default values -- which in the case of ``description`` and ``display_name``, is null. So if you want to change *only* the ``protected`` value with this call, be sure to also include the current values of the following parameters in the request body: - ``description`` - ``display_name`` - ``namespace`` - ``visibility`` The objects, properties, and resource type associations in a namespace are unaffected by this call. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - description: description - display_name: display_name - namespace: namespace - protected: protected-in-request - visibility: visibility-in-request Request Example --------------- .. literalinclude:: samples/metadef-namespace-update-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - description: description - display_name: display_name - namespace: namespace - owner: owner - protected: protected - schema: schema-namespace - self: self - updated_at: updated_at - visibility: visibility Response Example ---------------- .. literalinclude:: samples/metadef-namespace-update-response.json :language: json Delete namespace ~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name} Deletes a namespace and its properties, objects, and any resource type associations. .. note:: If the namespace is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to delete it. * If you try to delete a protected namespace, the call returns the ``403`` response code. * To change the ``protected`` attribute of a namespace, use the :ref:`Update namespace ` call. A successful operation returns the HTTP ``204`` (No Content) response code. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name The request does not take a body. The request does not return a body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-parameters.yaml0000664000175000017500000003261700000000000023056 0ustar00zuulzuul00000000000000# variables in header append: description: | If present and set to True, new metadefs tags are appended to the existing ones. Otherwise, existing tags are overwritten. in: header required: false type: string Content-Type-json: description: | The media type descriptor for the request body. Use ``application/json``. in: header required: true type: string Location: description: | The newly-created URI for the namespace. in: header required: true type: string # variables in path name: description: | Name of the resource type. A Name is limited to 80 chars in length. in: path required: true type: string namespace_name: description: | The name of the namespace whose details you want to see. (The name is the value of a namespace's ``namespace`` field.) in: path required: true type: string object_name: description: | The name of the object. in: path required: true type: string property_name: description: | The name of the property. in: path required: true type: string resource_type_name: description: | The name of the resource type. in: path required: true type: string tag_name: description: | The name of the tag. A Name is limited to 80 chars in length. in: path required: true type: string # variables in query limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer limit-tags: description: | Requests a page size of tags. Returns a number of tags up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the name of the last-seen tag from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | Allows specification of a *namespace identifier*. When present, only namespaces occurring after that namespace will be listed, that is, those namespaces having a ``sort_key`` later than that of the marker in the ``sort_dir`` direction. in: query required: false type: string marker-tags: description: | Allows specification of a tag name. When present, only tags occurring *after* the named tag will be listed, that is, those namespaces having a ``sort_key`` later than that of the marker in the ``sort_dir`` direction. in: query required: false type: string resource_type-in-query: description: | Filters the response by property names that start with a prefix from an associated resource type. The API removes the prefix of the resource type from the property name in the response. in: query required: false type: string resource_type-in-query-namespace-detail: description: | Apply the prefix for the specified resource type to the names of the properties listed in the response. If the resource type specified does not have an association with this namespace, or if the resource type is associated but does not have a prefix defined in this namespace, this parameter is ignored. in: query required: false type: string resource_types-in-query: description: | Filters the response to include only those namespaces that contain the specified resource type or types as resource type associations. Use the comma (``,``) character to separate multiple values. For example, ``OS::Glance::Image,OS::Nova::Flavor`` shows only namespaces associated with these resource types. in: query required: false type: integer sort_dir: description: | Sorts the response. Use ``asc`` for ascending or ``desc`` for descending order. The default is ``desc``. in: query required: false type: string sort_key: description: | Sorts the response by an attribute. Accepted values are ``namespace``, ``created_at``, and ``updated_at``. Default is ``created_at``. in: query required: false type: string sort_key-tags: description: | Sorts the response by an attribute. Accepted values are ``name``, ``created_at``, and ``updated_at``. Default is ``created_at``. in: query required: false type: string visibility-in-query: description: | Filters the response by a namespace visibility value. A valid value is ``public`` or ``private``. If you omit this parameter, the response shows both ``public`` and ``private`` namespaces. in: query required: false type: string # variables in body additionalItems: description: | Describes extra items, if you use tuple typing. If the value of ``items`` is an array (tuple typing) and the instance is longer than the list of schemas in ``items``, the additional items are described by the schema in this property. If this value is ``false``, the instance cannot be longer than the list of schemas in ``items``. If this value is ``true``, that is equivalent to the empty schema (anything goes). in: body required: false type: string created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_. in: body required: true type: string default: description: | Default property description. in: body required: false type: string description: description: | The description of the namespace. in: body required: false type: string display_name: description: | User-friendly name to use in a UI to display the namespace name. in: body required: false type: string enum: description: | Enumerated list of property values. in: body required: true type: array enum-in-request: description: | Enumerated list of property values. in: body required: false type: array first: description: | The URI for the first page of response. in: body required: true type: string hypervisor_type: description: | Hypervisor type of property values. in: body required: true type: object items: description: | Schema for the items in an array. in: body required: false type: string maximum: description: | Maximum allowed numerical value. in: body required: false type: string maxItems: description: | Maximum length of an array. in: body required: false type: string maxLength: description: | Maximum allowed string length. in: body required: false type: string minimum: description: | Minimum allowed numerical value. in: body required: false type: string minItems: description: | Minimum length of an array. in: body required: false type: string minLength: description: | Minimum allowed string length. in: body required: false type: string name-property: description: | The name of the property. A Name is limited to 80 chars in length. in: body required: true type: string name-resource-type: description: | Name of the resource type. in: body required: true type: string name-tag: description: | The name of the tag. A Name is limited to 80 chars in length. in: body required: true type: string namespace: description: | An identifier (a name) for the namespace. The value must be unique across all users. in: body required: true type: string namespaces: description: | A list of ``namespace`` objects. in: body required: true type: array next: description: | The URI for the next page of response. Will not be present on the last page of the response. in: body required: true type: string object-description: description: | Detailed description of the object. in: body required: true type: string object-description-in-request: description: | Detailed description of the object. in: body required: false type: string object-name: description: | The name of the object, suitable for use as an identifier. A Name is limited to 80 chars in length. in: body required: true type: string object-properties: description: | A set of key:value pairs, where each value is a *property* entity. in: body required: true type: object object-properties-in-request: description: | A set of key:value pairs, where each value is a *property* entity. in: body required: false type: object object-required: description: | A list of the names of properties that are required on this object. in: body required: true type: array object-required-in-request: description: | A list of the names of properties that are required on this object. in: body required: false type: array object-schema: description: | The URI of the JSON schema describing an *object*. in: body required: true type: string objects: description: | One or more object definitions of the namespace. in: body required: true type: string objects-namespace: description: | Namespace object definitions, if any. in: body required: false type: object operators: description: | Operators property description. in: body required: false type: string owner: description: | An identifier for the owner of this resource, usually the tenant ID. in: body required: true type: string pattern: description: | A regular expression ( `ECMA 262 `_ ) that a string value must match. in: body required: false type: string prefix: description: | Prefix for any properties in the namespace that you want to apply to the resource type. If you specify a prefix, you must append a prefix separator, such as the colon (``:``) character. in: body required: false type: string properties-dict: description: | A dictionary of key:value pairs, where each value is a *property* object as defined by the :ref:`Metadefs Property Schema `. in: body required: true type: object properties-nonempty: description: | One or more property definitions for the namespace. in: body required: true type: object properties-nullable: description: | Namespace property definitions, if any. in: body required: false type: object properties_target: description: | Some resource types allow more than one key and value pair for each instance. For example, the Image service allows both user and image metadata on volumes. The ``properties_target`` parameter enables a namespace target to remove the ambiguity. in: body required: false type: string property-description: description: | Detailed description of the property. in: body required: true type: string property-description-in-request: description: | Detailed description of the property. in: body required: false type: string protected: description: | Namespace protection for deletion, either ``true`` or ``false``. in: body required: true type: boolean protected-in-request: description: | Namespace protection for deletion. A valid value is ``true`` or ``false``. Default is ``false``. in: body required: false type: boolean readonly: description: | Indicates whether this is a read-only property. in: body required: false type: boolean resource_type_associations: description: | A list, each element of which is described by the :ref:`Metadefs Resource Type Association Schema `. in: body required: true type: array resource_types-list: description: | A list of abbreviated *resource type* JSON objects, where each object contains the ``name`` of the resource type and its ``created_at`` and ``updated_at`` timestamps in `ISO 8601 Format `_. in: body required: true type: array schema-namespace: description: | The URI of the JSON schema describing a *namespace*. in: body required: true type: string schema-namespaces: description: | The URI of the JSON schema describing a *namespaces* entity, that is, an entity consisting of a list of abbreviated namespace objects. in: body required: true type: string self: description: | The URI for this resource. in: body required: true type: string tag-name: description: | The name of the tag. in: body required: true type: string tags: description: | A list of *tag* objects, where each object is defined by the :ref:`Metadefs Tag Schema `. in: body required: true type: array title: description: | The title of the property. in: body required: true type: string type: description: | The property type. in: body required: true type: string uniqueItems: description: | Indicates whether all values in the array must be distinct. in: body required: false type: string updated_at: description: | The date and time when the resource was last updated. The date and time stamp format is `ISO 8601 `_. in: body required: true type: string visibility: description: | The namespace visibility, either ``public`` or ``private``. in: body required: true type: enum visibility-in-request: description: | The namespace visibility. A valid value is ``public`` or ``private``. Default is ``private``. in: body required: false type: enum ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-resourcetypes.inc0000664000175000017500000000722100000000000023427 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition resource types ********************************** Lists resource types. Also, creates, lists, and removes resource type associations in a namespace. *Since API v2.2* List resource types ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/resource_types Lists all available resource types. Using the other API calls in this section, you can create and maintain *resource type associations* between metadata definition namespaces and the resource types that are returned by this call. Normal response codes: 200 Error response codes: 400, 401, 404 Request ------- There are no request parameters. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - resource_types: resource_types-list Response Example ---------------- .. literalinclude:: samples/metadef-resource-types-list-response.json :language: json Create resource type association ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/metadefs/namespaces/{namespace_name}/resource_types Creates a resource type association between a namespace and the resource type specified in the body of the request. .. note:: If the resource type name specified does not name an existing resource type, a new resource type will be created as a side effect of this operation. Normal response codes: 201 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - name: name - prefix: prefix - properties_target: properties_target Request Example --------------- .. literalinclude:: samples/metadef-resource-type-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - created_at: created_at - prefix: prefix - properties_target: properties_target - name: name - updated_at: updated_at List resource type associations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/metadefs/namespaces/{namespace_name}/resource_types Lists resource type associations in a namespace. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name There is no request body. Response Parameters ------------------- .. rest_parameters:: metadefs-parameters.yaml - resource_type_associations: resource_type_associations Response Example ---------------- .. literalinclude:: samples/metadef-resource-type-assoc-list-response.json :language: json Remove resource type association ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/metadefs/namespaces/{namespace_name}/resource_types/{name} Removes a resource type association in a namespace. .. note:: If the namespace containing the association is protected, that is, if the ``protected`` attribute of the namespace is ``true``, then you must first set the ``protected`` attribute to ``false`` on the namespace before you will be permitted to remove the resource type association. * If you try to delete a resource type association from a protected namespace, the call returns the ``403`` response code. * To change the ``protected`` attribute of a namespace, use the :ref:`Update namespace ` call. When you successfully delete a resource type association from a namespace, the response is empty and the response code is ``204``. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: metadefs-parameters.yaml - namespace_name: namespace_name - name: resource_type_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/metadefs-schemas.inc0000664000175000017500000001541000000000000022135 0ustar00zuulzuul00000000000000.. -*- rst -*- Metadata definition schemas *************************** Gets a JSON-schema document that represents a metadata definition entity. *(Since API v2.2)* Show metadata definition namespace schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/namespace Shows a JSON schema document that represents a metadata definition *namespace* entity. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-namespace-show-response.json :language: json Show metadata definition namespaces schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/namespaces Shows a JSON schema document that represents a metadata definition *namespaces* entity. A namespaces entity is a container for *namespace* entities. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-namespaces-list-response.json :language: json .. _md-schema-rt-assoc: Show metadata definition namespace resource type association schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/resource_type Shows a JSON schema document that represents a metadata definition namespace *resource type association* entity. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-resource-type-association-show-response.json :language: json Show metadata definition namespace resource type associations schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/resource_types Shows a JSON schema document that represents a metadata definition namespace *resource type associations* entity. A resource type associations entity is a container for *resource type association* entities. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-resource-type-associations-list-response.json :language: json Show metadata definition object schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/object Shows a JSON schema document that represents a metadata definition *object* entity. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-object-show-response.json :language: json Show metadata definition objects schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/objects Shows a JSON schema document that represents a metadata definition *objects* entity. An objects entity is a container for *object* entities. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-objects-list-response.json :language: json .. _md-schema-property: Show metadata definition property schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/property Shows a JSON schema document that represents a metadata definition *property* entity. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-property-show-response.json :language: json Show metadata definition properties schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/properties Shows a JSON schema document that represents a metadata definition *properties* entity. A properties entity is a container for *property* entities. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-properties-list-response.json :language: json .. _md-schema-tag: Show metadata definition tag schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/tag Shows a JSON schema document that represents a metadata definition *tag* entity. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-tag-show-response.json :language: json Show metadata definition tags schema ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/metadefs/tags Shows a JSON schema document that represents a metadata definition *tags* entity. A tags entity is a container for *tag* entities. The following schema document is an example. The authoritative response is the actual response to the API call. Normal response codes: 200 Error response codes: 400, 401 Request ------- There are no request parameters. The call does not take a request body. Response Example ---------------- .. literalinclude:: samples/schemas-metadef-tags-list-response.json :language: json ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.806297 glance-29.0.0/api-ref/source/v2/samples/0000775000175000017500000000000000000000000017674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/add-location-request.json0000664000175000017500000000045100000000000024613 0ustar00zuulzuul00000000000000{ "url": "cinder://lvmdriver-1/39e6ffab-7502-4199-9609-416601615ca3", "validation_data": { "os_hash_algo": "sha512", "os_hash_value": "c5041ae163cf0f65600acfe7f6a63f212101687d41a57a4e18ffd2a07a452cd8175b8f5a4868dd2330bfe5ae123f18216bdbc9e0f80d131e64b94913a7b40bb5" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/cache-list-response.json0000664000175000017500000000060600000000000024441 0ustar00zuulzuul00000000000000{ "cached_images": [ { "image_id": "fe05d6c9-ef02-4161-9056-81ed046f3024", "hits": 0, "last_accessed": 1651504844.0860524, "last_modified": 1651504844.0860524, "size": 987654 } ], "queued_images": [ "e34e6e2f-fe16-420d-ad36-cebf69506106", "6b9fbf2b-3031-429a-80b1-b509e4c44046" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-create-request.json0000664000175000017500000000020500000000000024575 0ustar00zuulzuul00000000000000{ "container_format": "bare", "disk_format": "raw", "name": "Ubuntu", "id": "b2173dd3-7ad6-4362-baa6-a68bce3565cb" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-create-response.json0000664000175000017500000000132600000000000024750 0ustar00zuulzuul00000000000000{ "status": "queued", "name": "Ubuntu", "tags": [], "container_format": "bare", "created_at": "2015-11-29T22:21:42Z", "size": null, "disk_format": "raw", "updated_at": "2015-11-29T22:21:42Z", "visibility": "private", "locations": [], "self": "/v2/images/b2173dd3-7ad6-4362-baa6-a68bce3565cb", "min_disk": 0, "protected": false, "id": "b2173dd3-7ad6-4362-baa6-a68bce3565cb", "file": "/v2/images/b2173dd3-7ad6-4362-baa6-a68bce3565cb/file", "checksum": null, "os_hash_algo": null, "os_hash_value": null, "os_hidden": false, "owner": "bab7d5c60cd041a0a36f7c4b6e1dd978", "virtual_size": null, "min_ram": 0, "schema": "/v2/schemas/image" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-details-deactivate-response.json0000664000175000017500000000157500000000000027247 0ustar00zuulzuul00000000000000{ "status": "deactivated", "name": "cirros-0.3.2-x86_64-disk", "tags": [], "container_format": "bare", "created_at": "2014-05-05T17:15:10Z", "disk_format": "qcow2", "updated_at": "2014-05-05T17:15:11Z", "visibility": "public", "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", "min_disk": 0, "protected": false, "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", "checksum": "64d7c1cd2b6f60c92c14662941cb7913", "os_hash_algo": "sha512", "os_hash_value": "073b4523583784fbe01daff81eba092a262ec37ba6d04dd3f52e4cd5c93eb8258af44881345ecda0e49f3d8cc6d2df6b050ff3e72681d723234aff9d17d0cf09", "os_hidden": false, "owner": "5ef70662f8b34079a6eddb8da9d75fe8", "size": 13167616, "min_ram": 0, "schema": "/v2/schemas/image", "virtual_size": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-import-c-i-request.json0000664000175000017500000000025600000000000025320 0ustar00zuulzuul00000000000000{ "method": { "name": "copy-image" }, "stores": ["common", "cheap", "fast", "reliable"], "all_stores_must_succeed": false, "all_stores": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-import-g-d-request.json0000664000175000017500000000022700000000000025315 0ustar00zuulzuul00000000000000{ "method": { "name": "glance-direct" }, "stores": ["common", "cheap", "fast", "reliable"], "all_stores_must_succeed": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-import-gd-request.json0000664000175000017500000000027500000000000025243 0ustar00zuulzuul00000000000000{ "method": { "name": "glance-download", "glance_image_id": "c4705b36-b281-40f6-a01d-bf98883ead8e", "glance_region": "REGION2", "glance_service_interface": "public" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-import-w-d-request.json0000664000175000017500000000031600000000000025334 0ustar00zuulzuul00000000000000{ "method": { "name": "web-download", "uri": "https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-ppc64le-disk.img" }, "all_stores": true, "all_stores_must_succeed": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-info-import-response.json0000664000175000017500000000030400000000000025743 0ustar00zuulzuul00000000000000{ "import-methods": { "description": "Import methods available.", "type": "array", "value": [ "glance-direct", "web-download" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-member-create-request.json0000664000175000017500000000006500000000000026046 0ustar00zuulzuul00000000000000{ "member": "8989447062e04a818baf9e073fd04fa7" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-member-create-response.json0000664000175000017500000000040100000000000026206 0ustar00zuulzuul00000000000000{ "created_at": "2013-09-20T19:22:19Z", "image_id": "a96be11e-8536-4910-92cb-de50aa19dfe6", "member_id": "8989447062e04a818baf9e073fd04fa7", "schema": "/v2/schemas/member", "status": "pending", "updated_at": "2013-09-20T19:25:31Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-member-details-response.json0000664000175000017500000000040100000000000026370 0ustar00zuulzuul00000000000000{ "status": "pending", "created_at": "2013-11-26T07:21:21Z", "updated_at": "2013-11-26T07:21:21Z", "image_id": "0ae74cc5-5147-4239-9ce2-b0c580f7067e", "member_id": "8989447062e04a818baf9e073fd04fa7", "schema": "/v2/schemas/member" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-member-update-request.json0000664000175000017500000000003500000000000026062 0ustar00zuulzuul00000000000000{ "status": "accepted" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-member-update-response.json0000664000175000017500000000040200000000000026226 0ustar00zuulzuul00000000000000{ "created_at": "2013-09-20T19:22:19Z", "image_id": "a96be11e-8536-4910-92cb-de50aa19dfe6", "member_id": "8989447062e04a818baf9e073fd04fa7", "schema": "/v2/schemas/member", "status": "accepted", "updated_at": "2013-09-20T20:15:31Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-members-list-response.json0000664000175000017500000000122600000000000026107 0ustar00zuulzuul00000000000000{ "members": [ { "created_at": "2013-10-07T17:58:03Z", "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe", "member_id": "123456789", "schema": "/v2/schemas/member", "status": "pending", "updated_at": "2013-10-07T17:58:03Z" }, { "created_at": "2013-10-07T17:58:55Z", "image_id": "dbc999e3-c52f-4200-bedd-3b18fe7f87fe", "member_id": "987654321", "schema": "/v2/schemas/member", "status": "accepted", "updated_at": "2013-10-08T12:08:55Z" } ], "schema": "/v2/schemas/members" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-show-response.json0000664000175000017500000000157000000000000024466 0ustar00zuulzuul00000000000000{ "status": "active", "name": "cirros-0.3.2-x86_64-disk", "tags": [], "container_format": "bare", "created_at": "2014-05-05T17:15:10Z", "disk_format": "qcow2", "updated_at": "2014-05-05T17:15:11Z", "visibility": "public", "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", "min_disk": 0, "protected": false, "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", "checksum": "64d7c1cd2b6f60c92c14662941cb7913", "os_hash_algo": "sha512", "os_hash_value": "073b4523583784fbe01daff81eba092a262ec37ba6d04dd3f52e4cd5c93eb8258af44881345ecda0e49f3d8cc6d2df6b050ff3e72681d723234aff9d17d0cf09", "os_hidden": false, "owner": "5ef70662f8b34079a6eddb8da9d75fe8", "size": 13167616, "min_ram": 0, "schema": "/v2/schemas/image", "virtual_size": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-tasks-show-response.json0000664000175000017500000000173400000000000025613 0ustar00zuulzuul00000000000000{ "tasks": [ { "id": "ee22890e-8948-4ea6-9668-831f973c84f5", "image_id": "dddddddd-dddd-dddd-dddd-dddddddddddd", "request-id": "rrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr", "user": "uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu", "type": "api_image_import", "status": "processing", "owner": "64f0efc9955145aeb06f297a8a6fe402", "expires_at": null, "created_at": "2020-12-18T05:20:38.000000", "updated_at": "2020-12-18T05:25:39.000000", "deleted_at": null, "deleted": false, "input": { "image_id": "829c729b-ebc4-4cc7-a164-6f43f1149b17", "import_req": { "method": { "name": "copy-image" }, "all_stores": true, "all_stores_must_succeed": false }, "backend": [ "fast", "cheap", "slow", "reliable", "common" ] }, "result": null, "message": "Copied 15 MiB" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-update-request.json0000664000175000017500000000034500000000000024621 0ustar00zuulzuul00000000000000[ { "op": "replace", "path": "/name", "value": "Fedora 17" }, { "op": "replace", "path": "/tags", "value": [ "fedora", "beefy" ] } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/image-update-response.json0000664000175000017500000000161600000000000024771 0ustar00zuulzuul00000000000000{ "checksum": "710544e7f0c828b42f51207342622d33", "container_format": "ovf", "created_at": "2016-06-29T16:13:07Z", "disk_format": "vhd", "file": "/v2/images/2b61ed2b-f800-4da0-99ff-396b742b8646/file", "id": "2b61ed2b-f800-4da0-99ff-396b742b8646", "min_disk": 20, "min_ram": 512, "name": "Fedora 17", "owner": "02a7fb2dd4ef434c8a628c511dcbbeb6", "os_hash_algo": "sha512", "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e64549762ba94a1c44e92270cdbb69d7437dd1e101d00dd41684aaecccad1edc5c2e295e66d4733025b052497844", "os_hidden": false, "protected": false, "schema": "/v2/schemas/image", "self": "/v2/images/2b61ed2b-f800-4da0-99ff-396b742b8646", "size": 21909, "status": "active", "tags": [ "beefy", "fedora" ], "updated_at": "2016-07-25T14:48:18Z", "virtual_size": null, "visibility": "private" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/images-list-response.json0000664000175000017500000000430600000000000024644 0ustar00zuulzuul00000000000000{ "images": [ { "status": "active", "name": "cirros-0.3.2-x86_64-disk", "tags": [], "container_format": "bare", "created_at": "2014-11-07T17:07:06Z", "disk_format": "qcow2", "updated_at": "2014-11-07T17:19:09Z", "visibility": "public", "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", "min_disk": 0, "protected": false, "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", "checksum": "64d7c1cd2b6f60c92c14662941cb7913", "os_hash_algo": "sha512", "os_hash_value": "073b4523583784fbe01daff81eba092a262ec37ba6d04dd3f52e4cd5c93eb8258af44881345ecda0e49f3d8cc6d2df6b050ff3e72681d723234aff9d17d0cf09", "os_hidden": false, "owner": "5ef70662f8b34079a6eddb8da9d75fe8", "size": 13167616, "min_ram": 0, "schema": "/v2/schemas/image", "virtual_size": null }, { "status": "active", "name": "F17-x86_64-cfntools", "tags": [], "container_format": "bare", "created_at": "2014-10-30T08:23:39Z", "disk_format": "qcow2", "updated_at": "2014-11-03T16:40:10Z", "visibility": "public", "self": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c", "min_disk": 0, "protected": false, "id": "781b3762-9469-4cec-b58d-3349e5de4e9c", "file": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c/file", "checksum": "afab0f79bac770d61d24b4d0560b5f70", "os_hash_algo": "sha512", "os_hash_value": "ea3e20140df1cc65f53d4c5b9ee3b38d0d6868f61bbe2230417b0f98cef0e0c7c37f0ebc5c6456fa47f013de48b452617d56c15fdba25e100379bd0e81ee15ec", "os_hidden": false, "owner": "5ef70662f8b34079a6eddb8da9d75fe8", "size": 476704768, "min_ram": 0, "schema": "/v2/schemas/image", "virtual_size": null } ], "schema": "/v2/schemas/images", "first": "/v2/images" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/locations-list-detail-response.json0000664000175000017500000000023700000000000026631 0ustar00zuulzuul00000000000000[ { "url": "cinder://lvmdriver-1/39e6ffab-7502-4199-9609-416601615ca3", "metadata": { "store": "lvmdriver-1" } } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-create-request-simple.json0000664000175000017500000000033400000000000030344 0ustar00zuulzuul00000000000000{ "namespace": "FredCo::SomeCategory::Example", "display_name": "An Example Namespace", "description": "A metadata definitions namespace for example use.", "visibility": "public", "protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-create-request.json0000664000175000017500000000211600000000000027055 0ustar00zuulzuul00000000000000{ "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", "display_name": "Hypervisor Selection", "namespace": "OS::Compute::Hypervisor", "properties": { "hypervisor_type": { "description": "The hypervisor type.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "title": "Hypervisor Type", "type": "string" }, "vm_mode": { "description": "The virtual machine mode.", "enum": [ "hvm", "xen", "uml", "exe" ], "title": "VM Mode", "type": "string" } }, "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-create-response-simple.json0000664000175000017500000000072600000000000030517 0ustar00zuulzuul00000000000000{ "created_at": "2016-05-19T16:05:48Z", "description": "A metadata definitions namespace for example use.", "display_name": "An Example Namespace", "namespace": "FredCo::SomeCategory::Example", "owner": "c60b1d57c5034e0d86902aedf8c49be0", "protected": true, "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/FredCo::SomeCategory::Example", "updated_at": "2016-05-19T16:05:48Z", "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-create-response.json0000664000175000017500000000227500000000000027231 0ustar00zuulzuul00000000000000{ "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", "display_name": "Hypervisor Selection", "namespace": "OS::Compute::Hypervisor", "properties": { "hypervisor_type": { "description": "The hypervisor type.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "title": "Hypervisor Type", "type": "string" }, "vm_mode": { "description": "The virtual machine mode.", "enum": [ "hvm", "xen", "uml", "exe" ], "title": "VM Mode", "type": "string" } }, "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Hypervisor", "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-details-response.json0000664000175000017500000000222500000000000027406 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-28T14:57:10Z", "description": "The libvirt compute driver options.", "display_name": "libvirt Driver Options", "namespace": "OS::Compute::Libvirt", "owner": "admin", "properties": { "boot_menu": { "description": "If true, enables the BIOS bootmenu.", "enum": [ "true", "false" ], "title": "Boot Menu", "type": "string" }, "serial_port_count": { "description": "Specifies the count of serial ports.", "minimum": 0, "title": "Serial Port Count", "type": "integer" } }, "protected": true, "resource_type_associations": [ { "created_at": "2016-06-28T14:57:10Z", "name": "OS::Glance::Image", "prefix": "hw_" }, { "created_at": "2016-06-28T14:57:10Z", "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json0000664000175000017500000000223300000000000031001 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-28T14:57:10Z", "description": "The libvirt compute driver options.", "display_name": "libvirt Driver Options", "namespace": "OS::Compute::Libvirt", "owner": "admin", "properties": { "hw_boot_menu": { "description": "If true, enables the BIOS bootmenu.", "enum": [ "true", "false" ], "title": "Boot Menu", "type": "string" }, "hw_serial_port_count": { "description": "Specifies the count of serial ports.", "minimum": 0, "title": "Serial Port Count", "type": "integer" } }, "protected": true, "resource_type_associations": [ { "created_at": "2016-06-28T14:57:10Z", "name": "OS::Glance::Image", "prefix": "hw_" }, { "created_at": "2016-06-28T14:57:10Z", "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-update-request.json0000664000175000017500000000051500000000000027075 0ustar00zuulzuul00000000000000{ "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", "display_name": "Hypervisor Selection", "namespace": "OS::Compute::Hypervisor", "protected": false, "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespace-update-response.json0000664000175000017500000000110100000000000027233 0ustar00zuulzuul00000000000000{ "created_at": "2014-09-19T13:31:37Z", "description": "Choose capabilities that should be provided by the Compute Host. This provides the ability to fine tune the hardware specification required when a new vm is requested.", "display_name": "Hypervisor Selection", "namespace": "OS::Compute::Hypervisor", "owner": "7ec22942411e427692e8a3436be1031a", "protected": false, "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Hypervisor", "updated_at": "2014-09-19T13:31:37Z", "visibility": "public" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-namespaces-list-response.json0000664000175000017500000000705500000000000027125 0ustar00zuulzuul00000000000000{ "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", "namespaces": [ { "created_at": "2014-08-28T17:13:06Z", "description": "The libvirt compute driver options. These are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "display_name": "libvirt Driver Options", "namespace": "OS::Compute::Libvirt", "owner": "admin", "protected": true, "resource_type_associations": [ { "created_at": "2014-08-28T17:13:06Z", "name": "OS::Glance::Image", "updated_at": "2014-08-28T17:13:06Z" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt", "updated_at": "2014-08-28T17:13:06Z", "visibility": "public" }, { "created_at": "2014-08-28T17:13:06Z", "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", "display_name": "Flavor Quota", "namespace": "OS::Compute::Quota", "owner": "admin", "protected": true, "resource_type_associations": [ { "created_at": "2014-08-28T17:13:06Z", "name": "OS::Nova::Flavor", "updated_at": "2014-08-28T17:13:06Z" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::Quota", "updated_at": "2014-08-28T17:13:06Z", "visibility": "public" }, { "created_at": "2014-08-28T17:13:06Z", "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: https://opendev.org/openstack/nova-specs/src/branch/master/specs/juno/implemented/virt-driver-vcpu-topology.rst", "display_name": "Virtual CPU Topology", "namespace": "OS::Compute::VirtCPUTopology", "owner": "admin", "protected": true, "resource_type_associations": [ { "created_at": "2014-08-28T17:13:06Z", "name": "OS::Glance::Image", "prefix": "hw_", "updated_at": "2014-08-28T17:13:06Z" }, { "created_at": "2014-08-28T17:13:06Z", "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image", "updated_at": "2014-08-28T17:13:06Z" }, { "created_at": "2014-08-28T17:13:06Z", "name": "OS::Nova::Flavor", "prefix": "hw:", "updated_at": "2014-08-28T17:13:06Z" } ], "schema": "/v2/schemas/metadefs/namespace", "self": "/v2/metadefs/namespaces/OS::Compute::VirtCPUTopology", "updated_at": "2014-08-28T17:13:06Z", "visibility": "public" } ], "schema": "/v2/schemas/metadefs/namespaces" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-object-create-request.json0000664000175000017500000000314200000000000026367 0ustar00zuulzuul00000000000000{ "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_period": { "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "maximum": 1000000, "minimum": 1000, "title": "Quota: CPU Period", "type": "integer" }, "quota:cpu_quota": { "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "title": "Quota: CPU Quota", "type": "integer" }, "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-object-create-response.json0000664000175000017500000000346000000000000026540 0ustar00zuulzuul00000000000000{ "created_at": "2014-09-19T18:20:56Z", "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_period": { "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "maximum": 1000000, "minimum": 1000, "title": "Quota: CPU Period", "type": "integer" }, "quota:cpu_quota": { "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "title": "Quota: CPU Quota", "type": "integer" }, "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", "updated_at": "2014-09-19T18:20:56Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-object-details-response.json0000664000175000017500000000346000000000000026722 0ustar00zuulzuul00000000000000{ "created_at": "2014-09-19T18:20:56Z", "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_period": { "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "maximum": 1000000, "minimum": 1000, "title": "Quota: CPU Period", "type": "integer" }, "quota:cpu_quota": { "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "title": "Quota: CPU Quota", "type": "integer" }, "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", "updated_at": "2014-09-19T18:20:56Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-object-update-request.json0000664000175000017500000000117300000000000026410 0ustar00zuulzuul00000000000000{ "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-object-update-response.json0000664000175000017500000000151100000000000026552 0ustar00zuulzuul00000000000000{ "created_at": "2014-09-19T19:20:56Z", "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits", "updated_at": "2014-09-19T19:20:56Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-objects-list-response.json0000664000175000017500000001645500000000000026443 0ustar00zuulzuul00000000000000{ "objects": [ { "created_at": "2014-09-18T18:16:35Z", "description": "You can configure the CPU limits with control parameters.", "name": "CPU Limits", "properties": { "quota:cpu_period": { "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "maximum": 1000000, "minimum": 1000, "title": "Quota: CPU Period", "type": "integer" }, "quota:cpu_quota": { "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "title": "Quota: CPU Quota", "type": "integer" }, "quota:cpu_shares": { "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "title": "Quota: CPU Shares", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/CPU Limits" }, { "created_at": "2014-09-18T18:16:35Z", "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", "name": "Disk QoS", "properties": { "quota:disk_read_bytes_sec": { "description": "Sets disk I/O quota for disk read bytes / sec.", "title": "Quota: Disk read bytes / sec", "type": "integer" }, "quota:disk_read_iops_sec": { "description": "Sets disk I/O quota for disk read IOPS / sec.", "title": "Quota: Disk read IOPS / sec", "type": "integer" }, "quota:disk_total_bytes_sec": { "description": "Sets disk I/O quota for total disk bytes / sec.", "title": "Quota: Disk Total Bytes / sec", "type": "integer" }, "quota:disk_total_iops_sec": { "description": "Sets disk I/O quota for disk total IOPS / sec.", "title": "Quota: Disk Total IOPS / sec", "type": "integer" }, "quota:disk_write_bytes_sec": { "description": "Sets disk I/O quota for disk write bytes / sec.", "title": "Quota: Disk Write Bytes / sec", "type": "integer" }, "quota:disk_write_iops_sec": { "description": "Sets disk I/O quota for disk write IOPS / sec.", "title": "Quota: Disk Write IOPS / sec", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/Disk QoS" }, { "created_at": "2014-09-18T18:16:35Z", "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", "name": "Virtual Interface QoS", "properties": { "quota:vif_inbound_average": { "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "title": "Quota: VIF Inbound Average", "type": "integer" }, "quota:vif_inbound_burst": { "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "title": "Quota: VIF Inbound Burst", "type": "integer" }, "quota:vif_inbound_peak": { "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", "title": "Quota: VIF Inbound Peak", "type": "integer" }, "quota:vif_outbound_average": { "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "title": "Quota: VIF Outbound Average", "type": "integer" }, "quota:vif_outbound_burst": { "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "title": "Quota: VIF Outbound Burst", "type": "integer" }, "quota:vif_outbound_peak": { "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", "title": "Quota: VIF Outbound Burst", "type": "integer" } }, "required": [], "schema": "/v2/schemas/metadefs/object", "self": "/v2/metadefs/namespaces/OS::Compute::Quota/objects/Virtual Interface QoS" } ], "schema": "v2/schemas/metadefs/objects" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-properties-list-response.json0000664000175000017500000001044300000000000027175 0ustar00zuulzuul00000000000000{ "properties": { "hw_disk_bus": { "description": "Specifies the type of disk controller to attach disk devices to.", "enum": [ "scsi", "virtio", "uml", "xen", "ide", "usb", "fdc", "sata" ], "title": "Disk Bus", "type": "string" }, "hw_machine_type": { "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", "title": "Machine Type", "type": "string" }, "hw_qemu_guest_agent": { "description": "It is a daemon program running inside the domain which is supposed to help management applications with executing functions which need assistance of the guest OS. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", "enum": [ "yes", "no" ], "title": "QEMU Guest Agent", "type": "string" }, "hw_rng_model": { "default": "virtio", "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", "title": "Random Number Generator Device", "type": "string" }, "hw_scsi_model": { "default": "virtio-scsi", "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", "title": "SCSI Model", "type": "string" }, "hw_video_model": { "description": "The video image driver used.", "enum": [ "vga", "cirrus", "vmvga", "xen", "qxl" ], "title": "Video Model", "type": "string" }, "hw_video_ram": { "description": "Maximum RAM for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", "title": "Max Video Ram", "type": "integer" }, "hw_vif_model": { "description": "Specifies the model of virtual network interface device to use. The valid options depend on the configured hypervisor. KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, virtio, e1000e and vmxnet3. VMware: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, and VirtualVmxnet. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", "enum": [ "e1000", "ne2k_pci", "pcnet", "rtl8139", "virtio", "e1000e", "vmxnet3", "VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet", "netfront", "ne2k_pci" ], "title": "Virtual Network Interface", "type": "string" }, "os_command_line": { "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", "title": "Kernel Command Line", "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-property-create-request.json0000664000175000017500000000110100000000000026776 0ustar00zuulzuul00000000000000{ "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "name": "hypervisor_type", "title": "Hypervisor Type", "type": "string" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-property-create-response.json0000664000175000017500000000110100000000000027144 0ustar00zuulzuul00000000000000{ "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "name": "hypervisor_type", "title": "Hypervisor Type", "type": "string" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-property-details-response.json0000664000175000017500000000110100000000000027326 0ustar00zuulzuul00000000000000{ "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "name": "hypervisor_type", "title": "Hypervisor Type", "type": "string" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-property-update-request.json0000664000175000017500000000110100000000000027015 0ustar00zuulzuul00000000000000{ "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "name": "hypervisor_type", "title": "Hypervisor Type", "type": "string" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-property-update-response.json0000664000175000017500000000110100000000000027163 0ustar00zuulzuul00000000000000{ "description": "The hypervisor type. It may be used by the host properties filter for scheduling. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec.", "enum": [ "xen", "qemu", "kvm", "lxc", "uml", "vmware", "hyperv" ], "name": "hypervisor_type", "title": "Hypervisor Type", "type": "string" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json0000664000175000017500000000026000000000000031201 0ustar00zuulzuul00000000000000{ "created_at": "2014-09-19T16:09:13Z", "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image", "updated_at": "2014-09-19T16:09:13Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-resource-type-assoc-list-response.json0000664000175000017500000000050400000000000030712 0ustar00zuulzuul00000000000000{ "resource_type_associations": [ { "created_at": "2018-03-05T18:20:44Z", "name": "OS::Nova::Flavor", "prefix": "hw:" }, { "created_at": "2018-03-05T18:20:44Z", "name": "OS::Glance::Image", "prefix": "hw_" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-resource-type-create-request.json0000664000175000017500000000013400000000000027725 0ustar00zuulzuul00000000000000{ "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-resource-types-list-response.json0000664000175000017500000000151100000000000027766 0ustar00zuulzuul00000000000000{ "resource_types": [ { "created_at": "2014-08-28T18:13:04Z", "name": "OS::Glance::Image", "updated_at": "2014-08-28T18:13:04Z" }, { "created_at": "2014-08-28T18:13:04Z", "name": "OS::Cinder::Volume", "updated_at": "2014-08-28T18:13:04Z" }, { "created_at": "2014-08-28T18:13:04Z", "name": "OS::Nova::Flavor", "updated_at": "2014-08-28T18:13:04Z" }, { "created_at": "2014-08-28T18:13:04Z", "name": "OS::Nova::Aggregate", "updated_at": "2014-08-28T18:13:04Z" }, { "created_at": "2014-08-28T18:13:04Z", "name": "OS::Nova::Instance", "updated_at": "2014-08-28T18:13:04Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tag-create-response.json0000664000175000017500000000016700000000000026046 0ustar00zuulzuul00000000000000{ "created_at": "2015-05-09T01:12:31Z", "name": "added-sample-tag", "updated_at": "2015-05-09T01:12:31Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tag-details-response.json0000664000175000017500000000016200000000000026223 0ustar00zuulzuul00000000000000{ "created_at": "2015-05-06T23:16:12Z", "name": "sample-tag2", "updated_at": "2015-05-06T23:16:12Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tag-update-request.json0000664000175000017500000000003700000000000025713 0ustar00zuulzuul00000000000000{ "name": "new-tag-name" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tag-update-response.json0000664000175000017500000000016300000000000026061 0ustar00zuulzuul00000000000000{ "created_at": "2016-05-21T18:49:38Z", "name": "new-tag-name", "updated_at": "2016-05-21T19:04:22Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tags-create-request.json0000664000175000017500000000027400000000000026062 0ustar00zuulzuul00000000000000{ "tags": [ { "name": "sample-tag1" }, { "name": "sample-tag2" }, { "name": "sample-tag3" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tags-create-response.json0000664000175000017500000000027400000000000026230 0ustar00zuulzuul00000000000000{ "tags": [ { "name": "sample-tag1" }, { "name": "sample-tag2" }, { "name": "sample-tag3" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/metadef-tags-list-response.json0000664000175000017500000000027400000000000025740 0ustar00zuulzuul00000000000000{ "tags": [ { "name": "sample-tag1" }, { "name": "sample-tag2" }, { "name": "sample-tag3" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-image-member-show-response.json0000664000175000017500000000201100000000000027343 0ustar00zuulzuul00000000000000{ "name": "member", "properties": { "created_at": { "description": "Date and time of image member creation", "type": "string" }, "image_id": { "description": "An identifier for the image", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "member_id": { "description": "An identifier for the image member (tenantId)", "type": "string" }, "schema": { "readOnly": true, "type": "string" }, "status": { "description": "The status of this image member", "enum": [ "pending", "accepted", "rejected" ], "type": "string" }, "updated_at": { "description": "Date and time of last modification of image member", "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-image-members-list-response.json0000664000175000017500000000331600000000000027532 0ustar00zuulzuul00000000000000{ "links": [ { "href": "{schema}", "rel": "describedby" } ], "name": "members", "properties": { "members": { "items": { "name": "member", "properties": { "created_at": { "description": "Date and time of image member creation", "type": "string" }, "image_id": { "description": "An identifier for the image", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "member_id": { "description": "An identifier for the image member (tenantId)", "type": "string" }, "schema": { "readOnly": true, "type": "string" }, "status": { "description": "The status of this image member", "enum": [ "pending", "accepted", "rejected" ], "type": "string" }, "updated_at": { "description": "Date and time of last modification of image member", "type": "string" } } }, "type": "array" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-image-show-response.json0000664000175000017500000001724100000000000026111 0ustar00zuulzuul00000000000000{ "additionalProperties": { "type": "string" }, "links": [ { "href": "{self}", "rel": "self" }, { "href": "{file}", "rel": "enclosure" }, { "href": "{schema}", "rel": "describedby" } ], "name": "image", "properties": { "architecture": { "description": "Operating system architecture as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "is_base": false, "type": "string" }, "checksum": { "description": "md5 hash of image contents.", "maxLength": 32, "readOnly": true, "type": [ "null", "string" ] }, "container_format": { "description": "Format of the container", "enum": [ null, "ami", "ari", "aki", "bare", "ovf", "ova", "docker", "compressed" ], "type": [ "null", "string" ] }, "created_at": { "description": "Date and time of image registration", "readOnly": true, "type": "string" }, "direct_url": { "description": "URL to access the image file kept in external store", "readOnly": true, "type": "string" }, "disk_format": { "description": "Format of the disk", "enum": [ null, "ami", "ari", "aki", "vhd", "vhdx", "vmdk", "raw", "qcow2", "vdi", "iso", "ploop" ], "type": [ "null", "string" ] }, "file": { "description": "An image file url", "readOnly": true, "type": "string" }, "id": { "description": "An identifier for the image", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "instance_uuid": { "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)", "is_base": false, "type": "string" }, "kernel_id": { "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image.", "is_base": false, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": [ "null", "string" ] }, "locations": { "description": "A set of URLs to access the image file kept in external store", "items": { "properties": { "metadata": { "type": "object" }, "url": { "maxLength": 255, "type": "string" } }, "required": [ "url", "metadata" ], "type": "object" }, "type": "array" }, "min_disk": { "description": "Amount of disk space (in GB) required to boot image.", "type": "integer" }, "min_ram": { "description": "Amount of ram (in MB) required to boot image.", "type": "integer" }, "name": { "description": "Descriptive name for the image", "maxLength": 255, "type": [ "null", "string" ] }, "os_distro": { "description": "Common name of operating system distribution as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "is_base": false, "type": "string" }, "os_hash_algo": { "description": "Algorithm to calculate the os_hash_value", "maxLength": 64, "readOnly": true, "type": [ "null", "string" ] }, "os_hash_value": { "description": "Hexdigest of the image contents using the algorithm specified by the os_hash_algo", "maxLength": 128, "readOnly": true, "type": [ "null", "string" ] }, "os_hidden": { "description": "If true, image will not appear in default image list response.", "type": "boolean" }, "os_version": { "description": "Operating system version as specified by the distributor", "is_base": false, "type": "string" }, "owner": { "description": "Owner of the image", "maxLength": 255, "type": [ "null", "string" ] }, "protected": { "description": "If true, image will not be deletable.", "type": "boolean" }, "ramdisk_id": { "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image.", "is_base": false, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": [ "null", "string" ] }, "schema": { "description": "An image schema url", "readOnly": true, "type": "string" }, "self": { "description": "An image self url", "readOnly": true, "type": "string" }, "size": { "description": "Size of image file in bytes", "readOnly": true, "type": [ "null", "integer" ] }, "status": { "description": "Status of the image", "enum": [ "queued", "saving", "active", "killed", "deleted", "pending_delete", "deactivated", "uploading", "importing" ], "readOnly": true, "type": "string" }, "tags": { "description": "List of strings related to the image", "items": { "maxLength": 255, "type": "string" }, "type": "array" }, "updated_at": { "description": "Date and time of the last image modification", "readOnly": true, "type": "string" }, "virtual_size": { "description": "Virtual size of image in bytes", "readOnly": true, "type": [ "null", "integer" ] }, "visibility": { "description": "Scope of image accessibility", "enum": [ "public", "private" ], "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-images-list-response.json0000664000175000017500000002641500000000000026272 0ustar00zuulzuul00000000000000{ "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "images", "properties": { "first": { "type": "string" }, "images": { "items": { "additionalProperties": { "type": "string" }, "links": [ { "href": "{self}", "rel": "self" }, { "href": "{file}", "rel": "enclosure" }, { "href": "{schema}", "rel": "describedby" } ], "name": "image", "properties": { "architecture": { "description": "Operating system architecture as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "is_base": false, "type": "string" }, "checksum": { "description": "md5 hash of image contents.", "maxLength": 32, "readOnly": true, "type": [ "null", "string" ] }, "container_format": { "description": "Format of the container", "enum": [ null, "ami", "ari", "aki", "bare", "ovf", "ova", "docker", "compressed" ], "type": [ "null", "string" ] }, "created_at": { "description": "Date and time of image registration", "readOnly": true, "type": "string" }, "direct_url": { "description": "URL to access the image file kept in external store", "readOnly": true, "type": "string" }, "disk_format": { "description": "Format of the disk", "enum": [ null, "ami", "ari", "aki", "vhd", "vhdx", "vmdk", "raw", "qcow2", "vdi", "iso", "ploop" ], "type": [ "null", "string" ] }, "file": { "description": "An image file url", "readOnly": true, "type": "string" }, "id": { "description": "An identifier for the image", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "instance_uuid": { "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)", "is_base": false, "type": "string" }, "kernel_id": { "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image.", "is_base": false, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": [ "null", "string" ] }, "locations": { "description": "A set of URLs to access the image file kept in external store", "items": { "properties": { "metadata": { "type": "object" }, "url": { "maxLength": 255, "type": "string" } }, "required": [ "url", "metadata" ], "type": "object" }, "type": "array" }, "min_disk": { "description": "Amount of disk space (in GB) required to boot image.", "type": "integer" }, "min_ram": { "description": "Amount of ram (in MB) required to boot image.", "type": "integer" }, "name": { "description": "Descriptive name for the image", "maxLength": 255, "type": [ "null", "string" ] }, "os_distro": { "description": "Common name of operating system distribution as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "is_base": false, "type": "string" }, "os_hash_algo": { "description": "Algorithm to calculate the os_hash_value", "maxLength": 64, "readOnly": true, "type": [ "null", "string" ] }, "os_hash_value": { "description": "Hexdigest of the image contents using the algorithm specified by the os_hash_algo", "maxLength": 128, "readOnly": true, "type": [ "null", "string" ] }, "os_hidden": { "description": "If true, image will not appear in default image list response.", "type": "boolean" }, "os_version": { "description": "Operating system version as specified by the distributor", "is_base": false, "type": "string" }, "owner": { "description": "Owner of the image", "maxLength": 255, "type": [ "null", "string" ] }, "protected": { "description": "If true, image will not be deletable.", "type": "boolean" }, "ramdisk_id": { "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image.", "is_base": false, "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": [ "null", "string" ] }, "schema": { "description": "An image schema url", "readOnly": true, "type": "string" }, "self": { "description": "An image self url", "readOnly": true, "type": "string" }, "size": { "description": "Size of image file in bytes", "readOnly": true, "type": [ "null", "integer" ] }, "status": { "description": "Status of the image", "enum": [ "queued", "saving", "active", "killed", "deleted", "pending_delete", "deactivated", "uploading", "importing" ], "readOnly": true, "type": "string" }, "tags": { "description": "List of strings related to the image", "items": { "maxLength": 255, "type": "string" }, "type": "array" }, "updated_at": { "description": "Date and time of the last image modification", "readOnly": true, "type": "string" }, "virtual_size": { "description": "Virtual size of image in bytes", "readOnly": true, "type": [ "null", "integer" ] }, "visibility": { "description": "Scope of image accessibility", "enum": [ "public", "private" ], "type": "string" } } }, "type": "array" }, "next": { "type": "string" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json0000664000175000017500000001605200000000000030365 0ustar00zuulzuul00000000000000{ "additionalProperties": false, "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "property": { "additionalProperties": { "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "title", "type" ], "type": "object" }, "type": "object" }, "stringArray": { "items": { "type": "string" }, "type": "array", "uniqueItems": true } }, "name": "namespace", "properties": { "created_at": { "description": "Date and time of namespace creation", "format": "date-time", "readOnly": true, "type": "string" }, "description": { "description": "Provides a user friendly description of the namespace.", "maxLength": 500, "type": "string" }, "display_name": { "description": "The user friendly name for the namespace. Used by UI if available.", "maxLength": 80, "type": "string" }, "namespace": { "description": "The unique namespace text.", "maxLength": 80, "type": "string" }, "objects": { "items": { "properties": { "description": { "type": "string" }, "name": { "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "required": { "$ref": "#/definitions/stringArray" } }, "type": "object" }, "type": "array" }, "owner": { "description": "Owner of the namespace.", "maxLength": 255, "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "protected": { "description": "If true, namespace will not be deletable.", "type": "boolean" }, "resource_type_associations": { "items": { "properties": { "name": { "type": "string" }, "prefix": { "type": "string" }, "properties_target": { "type": "string" } }, "type": "object" }, "type": "array" }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "tags": { "items": { "properties": { "name": { "type": "string" } }, "type": "object" }, "type": "array" }, "updated_at": { "description": "Date and time of the last namespace modification", "format": "date-time", "readOnly": true, "type": "string" }, "visibility": { "description": "Scope of namespace accessibility.", "enum": [ "public", "private" ], "type": "string" } }, "required": [ "namespace" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json0000664000175000017500000002164200000000000030544 0ustar00zuulzuul00000000000000{ "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "property": { "additionalProperties": { "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "title", "type" ], "type": "object" }, "type": "object" }, "stringArray": { "items": { "type": "string" }, "type": "array", "uniqueItems": true } }, "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "namespaces", "properties": { "first": { "type": "string" }, "namespaces": { "items": { "additionalProperties": false, "name": "namespace", "properties": { "created_at": { "description": "Date and time of namespace creation", "format": "date-time", "readOnly": true, "type": "string" }, "description": { "description": "Provides a user friendly description of the namespace.", "maxLength": 500, "type": "string" }, "display_name": { "description": "The user friendly name for the namespace. Used by UI if available.", "maxLength": 80, "type": "string" }, "namespace": { "description": "The unique namespace text.", "maxLength": 80, "type": "string" }, "objects": { "items": { "properties": { "description": { "type": "string" }, "name": { "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "required": { "$ref": "#/definitions/stringArray" } }, "type": "object" }, "type": "array" }, "owner": { "description": "Owner of the namespace.", "maxLength": 255, "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "protected": { "description": "If true, namespace will not be deletable.", "type": "boolean" }, "resource_type_associations": { "items": { "properties": { "name": { "type": "string" }, "prefix": { "type": "string" }, "properties_target": { "type": "string" } }, "type": "object" }, "type": "array" }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "tags": { "items": { "properties": { "name": { "type": "string" } }, "type": "object" }, "type": "array" }, "updated_at": { "description": "Date and time of the last namespace modification", "format": "date-time", "readOnly": true, "type": "string" }, "visibility": { "description": "Scope of namespace accessibility.", "enum": [ "public", "private" ], "type": "string" } }, "required": [ "namespace" ] }, "type": "array" }, "next": { "type": "string" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-object-show-response.json0000664000175000017500000001165300000000000027701 0ustar00zuulzuul00000000000000{ "additionalProperties": false, "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "property": { "additionalProperties": { "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "title", "type" ], "type": "object" }, "type": "object" }, "stringArray": { "items": { "type": "string" }, "type": "array", "uniqueItems": true } }, "name": "object", "properties": { "created_at": { "description": "Date and time of object creation", "format": "date-time", "readOnly": true, "type": "string" }, "description": { "type": "string" }, "name": { "maxLength": 255, "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "required": { "$ref": "#/definitions/stringArray" }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "updated_at": { "description": "Date and time of the last object modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-objects-list-response.json0000664000175000017500000001372500000000000030061 0ustar00zuulzuul00000000000000{ "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "property": { "additionalProperties": { "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "title", "type" ], "type": "object" }, "type": "object" }, "stringArray": { "items": { "type": "string" }, "type": "array", "uniqueItems": true } }, "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "objects", "properties": { "first": { "type": "string" }, "next": { "type": "string" }, "objects": { "items": { "additionalProperties": false, "name": "object", "properties": { "created_at": { "description": "Date and time of object creation", "format": "date-time", "readOnly": true, "type": "string" }, "description": { "type": "string" }, "name": { "maxLength": 255, "type": "string" }, "properties": { "$ref": "#/definitions/property" }, "required": { "$ref": "#/definitions/stringArray" }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "updated_at": { "description": "Date and time of the last object modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] }, "type": "array" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-properties-list-response.json0000664000175000017500000001076100000000000030621 0ustar00zuulzuul00000000000000{ "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "stringArray": { "items": { "type": "string" }, "minItems": 1, "type": "array", "uniqueItems": true } }, "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "properties", "properties": { "first": { "type": "string" }, "next": { "type": "string" }, "properties": { "additionalProperties": { "additionalProperties": false, "name": "property", "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "type", "title" ] }, "type": "object" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-property-show-response.json0000664000175000017500000000545500000000000030322 0ustar00zuulzuul00000000000000{ "additionalProperties": false, "definitions": { "positiveInteger": { "minimum": 0, "type": "integer" }, "positiveIntegerDefault0": { "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] }, "stringArray": { "items": { "type": "string" }, "minItems": 1, "type": "array", "uniqueItems": true } }, "name": "property", "properties": { "additionalItems": { "type": "boolean" }, "default": {}, "description": { "type": "string" }, "enum": { "type": "array" }, "items": { "properties": { "enum": { "type": "array" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" } }, "type": "object" }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "maximum": { "type": "number" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "minimum": { "type": "number" }, "name": { "maxLength": 255, "type": "string" }, "operators": { "items": { "type": "string" }, "type": "array" }, "pattern": { "format": "regex", "type": "string" }, "readonly": { "type": "boolean" }, "required": { "$ref": "#/definitions/stringArray" }, "title": { "type": "string" }, "type": { "enum": [ "array", "boolean", "integer", "number", "object", "string", null ], "type": "string" }, "uniqueItems": { "default": false, "type": "boolean" } }, "required": [ "type", "title", "name" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json0000664000175000017500000000307100000000000033546 0ustar00zuulzuul00000000000000{ "additionalProperties": false, "name": "resource_type_association", "properties": { "created_at": { "description": "Date and time of resource type association", "format": "date-time", "readOnly": true, "type": "string" }, "name": { "description": "Resource type names should be aligned with Heat resource types whenever possible: https://docs.openstack.org/heat/latest/template_guide/openstack.html", "maxLength": 80, "type": "string" }, "prefix": { "description": "Specifies the prefix to use for the given resource type. Any properties in the namespace should be prefixed with this prefix when being applied to the specified resource type. Must include prefix separator (e.g. a colon :).", "maxLength": 80, "type": "string" }, "properties_target": { "description": "Some resource types allow more than one key / value pair per instance. For example, Cinder allows user and image metadata on volumes. Only the image properties metadata is evaluated by Nova (scheduling or drivers). This property allows a namespace target to remove the ambiguity.", "maxLength": 80, "type": "string" }, "updated_at": { "description": "Date and time of the last resource type association modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json 22 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.jso0000664000175000017500000000510100000000000033542 0ustar00zuulzuul00000000000000{ "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "resource_type_associations", "properties": { "first": { "type": "string" }, "next": { "type": "string" }, "resource_type_associations": { "items": { "additionalProperties": false, "name": "resource_type_association", "properties": { "created_at": { "description": "Date and time of resource type association", "format": "date-time", "readOnly": true, "type": "string" }, "name": { "description": "Resource type names should be aligned with Heat resource types whenever possible: https://docs.openstack.org/heat/latest/template_guide/openstack.html", "maxLength": 80, "type": "string" }, "prefix": { "description": "Specifies the prefix to use for the given resource type. Any properties in the namespace should be prefixed with this prefix when being applied to the specified resource type. Must include prefix separator (e.g. a colon :).", "maxLength": 80, "type": "string" }, "properties_target": { "description": "Some resource types allow more than one key / value pair per instance. For example, Cinder allows user and image metadata on volumes. Only the image properties metadata is evaluated by Nova (scheduling or drivers). This property allows a namespace target to remove the ambiguity.", "maxLength": 80, "type": "string" }, "updated_at": { "description": "Date and time of the last resource type association modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] }, "type": "array" }, "schema": { "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-tag-show-response.json0000664000175000017500000000113200000000000027175 0ustar00zuulzuul00000000000000{ "additionalProperties": false, "name": "tag", "properties": { "created_at": { "description": "Date and time of tag creation", "format": "date-time", "readOnly": true, "type": "string" }, "name": { "maxLength": 255, "type": "string" }, "updated_at": { "description": "Date and time of the last tag modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-metadef-tags-list-response.json0000664000175000017500000000266200000000000027364 0ustar00zuulzuul00000000000000{ "links": [ { "href": "{first}", "rel": "first" }, { "href": "{next}", "rel": "next" }, { "href": "{schema}", "rel": "describedby" } ], "name": "tags", "properties": { "first": { "type": "string" }, "next": { "type": "string" }, "schema": { "type": "string" }, "tags": { "items": { "additionalProperties": false, "name": "tag", "properties": { "created_at": { "description": "Date and time of tag creation", "format": "date-time", "readOnly": true, "type": "string" }, "name": { "maxLength": 255, "type": "string" }, "updated_at": { "description": "Date and time of the last tag modification", "format": "date-time", "readOnly": true, "type": "string" } }, "required": [ "name" ] }, "type": "array" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-task-show-response.json0000664000175000017500000000403000000000000025761 0ustar00zuulzuul00000000000000{ "name": "task", "properties": { "created_at": { "description": "Datetime when this resource was created", "type": "string" }, "expires_at": { "description": "Datetime when this resource would be subject to removal", "type": [ "null", "string" ] }, "id": { "description": "An identifier for the task", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "input": { "description": "The parameters required by task, JSON blob", "type": [ "null", "object" ] }, "message": { "description": "Human-readable informative message only included when appropriate (usually on failure)", "type": "string" }, "owner": { "description": "An identifier for the owner of this task", "type": "string" }, "result": { "description": "The result of current task, JSON blob", "type": [ "null", "object" ] }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "status": { "description": "The current status of this task", "enum": [ "pending", "processing", "success", "failure" ], "type": "string" }, "type": { "description": "The type of task represented by this content", "enum": [ "import" ], "type": "string" }, "updated_at": { "description": "Datetime when this resource was updated", "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/schemas-tasks-list-response.json0000664000175000017500000000462700000000000026153 0ustar00zuulzuul00000000000000{ "links": [ { "href": "{schema}", "rel": "describedby" } ], "name": "tasks", "properties": { "schema": { "type": "string" }, "tasks": { "items": { "name": "task", "properties": { "created_at": { "description": "Datetime when this resource was created", "type": "string" }, "expires_at": { "description": "Datetime when this resource would be subject to removal", "type": [ "null", "string" ] }, "id": { "description": "An identifier for the task", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "type": "string" }, "owner": { "description": "An identifier for the owner of this task", "type": "string" }, "schema": { "readOnly": true, "type": "string" }, "self": { "readOnly": true, "type": "string" }, "status": { "description": "The current status of this task", "enum": [ "pending", "processing", "success", "failure" ], "type": "string" }, "type": { "description": "The type of task represented by this content", "enum": [ "import" ], "type": "string" }, "updated_at": { "description": "Datetime when this resource was updated", "type": "string" } } }, "type": "array" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/stores-list-detail-response.json0000664000175000017500000000252000000000000026152 0ustar00zuulzuul00000000000000{ "stores": [ { "id":"reliable", "type": "rbd", "description": "More expensive store with data redundancy", "default": true, "weight": 100, "properties": { "pool": "pool1", "chunk_size": 65536, "thin_provisioning": false } }, { "id":"cheap", "type": "file", "description": "Less expensive store for seldom-used images", "weight": 200, "properties": { "datadir": "fdir", "chunk_size": 65536, "thin_provisioning": false } }, { "id":"fast", "type": "cinder", "description": "Reasonably-priced fast store", "weight": 300, "properties": { "volume_type": "volume1", "use_multipath": false } }, { "id":"slow", "type": "swift", "description": "Entry-level store balancing price and speed", "weight": 400, "properties": { "container": "container1", "large_object_size": 52428, "large_object_chunk_size": 204800 } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/stores-list-response.json0000664000175000017500000000111500000000000024711 0ustar00zuulzuul00000000000000{ "stores": [ { "id":"reliable", "description": "More expensive store with data redundancy" }, { "id":"fast", "description": "Provides quick access to your image data", "default": true }, { "id":"cheap", "description": "Less expensive store for seldom-used images" }, { "id":"special", "description": "Need a plausible description here that doesn't expose the store type", "read-only": true } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/task-create-request.json0000664000175000017500000000042600000000000024462 0ustar00zuulzuul00000000000000{ "type": "import", "input": { "import_from": "http://app-catalog.openstack.example.org/groovy-image", "import_from_format": "qcow2", "image_properties": { "disk_format": "vhd", "container_format": "ovf" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/task-create-response.json0000664000175000017500000000115500000000000024630 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-24T14:57:19Z", "id": "bb480de2-7077-4ea9-bbe9-be1891290d3e", "input": { "image_properties": { "container_format": "ovf", "disk_format": "vhd" }, "import_from": "http://app-catalog.openstack.example.org/groovy-image", "import_from_format": "qcow2" }, "message": "", "owner": "fa6c8c1600f4444281658a23ee6da8e8", "result": null, "schema": "/v2/schemas/task", "self": "/v2/tasks/bb480de2-7077-4ea9-bbe9-be1891290d3e", "status": "pending", "type": "import", "updated_at": "2016-06-24T14:57:19Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/task-show-failure-response.json0000664000175000017500000000127000000000000025770 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-24T14:57:20Z", "expires_at": "2016-06-26T14:57:20Z", "id": "bb480de2-7077-4ea9-bbe9-be1891290d3e", "input": { "image_properties": { "container_format": "ovf", "disk_format": "vhd" }, "import_from": "http://app-catalog.openstack.example.org/groovy-image", "import_from_format": "qcow2" }, "message": "Task failed due to Internal Error", "owner": "fa6c8c1600f4444281658a23ee6da8e8", "result": null, "schema": "/v2/schemas/task", "self": "/v2/tasks/bb480de2-7077-4ea9-bbe9-be1891290d3e", "status": "failure", "type": "import", "updated_at": "2016-06-24T14:57:20Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/task-show-processing-response.json0000664000175000017500000000111500000000000026513 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-24T14:40:19Z", "id": "231c311d-3557-4e23-afc4-6d98af1419e7", "input": { "image_properties": { "container_format": "ovf", "disk_format": "vhd" }, "import_from": "http://example.com", "import_from_format": "qcow2" }, "message": "", "owner": "fa6c8c1600f4444281658a23ee6da8e8", "result": null, "schema": "/v2/schemas/task", "self": "/v2/tasks/231c311d-3557-4e23-afc4-6d98af1419e7", "status": "processing", "type": "import", "updated_at": "2016-06-24T14:40:20Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/task-show-success-response.json0000664000175000017500000000131200000000000026006 0ustar00zuulzuul00000000000000{ "created_at": "2016-06-29T16:13:07Z", "expires_at": "2016-07-01T16:13:07Z", "id": "805f47d2-8814-4cd7-bef3-37037389a998", "input": { "image_properties": { "container_format": "ovf", "disk_format": "vhd" }, "import_from": "https://apps.openstack.org/excellent-image", "import_from_format": "qcow2" }, "message": "", "owner": "02a7fb2dd4ef434c8a628c511dcbbeb6", "result": { "image_id": "2b61ed2b-f800-4da0-99ff-396b742b8646" }, "schema": "/v2/schemas/task", "self": "/v2/tasks/805f47d2-8814-4cd7-bef3-37037389a998", "status": "success", "type": "import", "updated_at": "2016-06-29T16:13:07Z" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/tasks-list-response.json0000664000175000017500000000161700000000000024526 0ustar00zuulzuul00000000000000{ "first": "/v2/tasks", "schema": "/v2/schemas/tasks", "tasks": [ { "created_at": "2016-06-24T14:44:19Z", "id": "08b7e1c8-3821-4f54-b3b8-d6655d178cdf", "owner": "fa6c8c1600f4444281658a23ee6da8e8", "schema": "/v2/schemas/task", "self": "/v2/tasks/08b7e1c8-3821-4f54-b3b8-d6655d178cdf", "status": "processing", "type": "import", "updated_at": "2016-06-24T14:44:19Z" }, { "created_at": "2016-06-24T14:40:19Z", "id": "231c311d-3557-4e23-afc4-6d98af1419e7", "owner": "fa6c8c1600f4444281658a23ee6da8e8", "schema": "/v2/schemas/task", "self": "/v2/tasks/231c311d-3557-4e23-afc4-6d98af1419e7", "status": "processing", "type": "import", "updated_at": "2016-06-24T14:40:20Z" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/samples/usage-response.json0000664000175000017500000000060700000000000023532 0ustar00zuulzuul00000000000000{ "usage": { "image_size_total": { "limit": 1024, "usage": 256 }, "image_count_total": { "limit": 10, "usage": 2 }, "image_stage_total": { "limit": 512, "usage": 0 }, "image_count_uploading": { "limit": 2, "usage": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/stores.inc0000664000175000017500000000114200000000000020240 0ustar00zuulzuul00000000000000.. -*- rst -*- Stores ****** Multi-store backend support allows for storing copies of an image in multiple places. Delete image from store ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/stores/{store_id}/{image_id} This API allows you to delete a copy of the image from a specific store. *(Since Image API v2.10)* .. note:: * This API will not allow deletion of the last location for an image. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409 Request ------- .. rest_parameters:: images-parameters.yaml - store_id: store-in-path - image_id: image_id-in-path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/tasks-parameters.yaml0000664000175000017500000001174700000000000022414 0ustar00zuulzuul00000000000000# variables in header Content-Type-json: description: | The media type descriptor for the request body. Use ``application/json``. in: header required: true type: string # variables in path task_id: description: | The identifier for the task, a UUID. in: path required: true type: string # variables in query limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string sort_dir: description: | Sorts the response by a set of one or more sort direction and attribute (``sort_key``) combinations. A valid value for the sort direction is ``asc`` (ascending) or ``desc`` (descending). If you omit the sort direction in a set, the default is ``desc``. in: query required: false type: string sort_key: description: | Sorts the response by one of the following attributes: ``created_at``, ``expires_at``, ``status``, ``type``, ``updated_at``. Default is ``created_at``. in: query required: false type: string status-in-query: description: | Filters the response by a task status. A valid value is ``pending``, ``processing``, ``success``, or ``failure``. in: query required: false type: string type-in-query: description: | Filters the response by a task type. A valid value is ``import``. in: query required: false type: string # variables in body created_at: description: | The date and time when the task was created. The date and time stamp format is `ISO 8601 `_. in: body required: true type: string expires_at: description: | The date and time when the task is subject to removal. While the *task object*, that is, the record describing the task is subject to deletion, the result of the task (for example, an imported image) still exists. The date and time stamp format is `ISO 8601 `_. This value is only set when the task reaches status ``success`` or ``failure``. Otherwise its value is ``null``. It may not appear in the response when its value is ``null``. in: body required: true type: string first: description: | The URI for the first page of response. in: body required: true type: string id: description: | The UUID of the task. in: body required: true type: string input: description: | A JSON object specifying the input parameters to the task. Consult your cloud provider's documentation for details. in: body required: true type: object message: description: | Human-readable text, possibly an empty string, usually displayed in an error situation to provide more information about what has occurred. in: body required: true type: string next: description: | The URI for the next page of response. Will not be present on the last page of the response. in: body required: true type: string owner: description: | An identifier for the owner of the task, usually the tenant ID. in: body required: true type: string result: description: | A JSON object specifying information about the ultimate outcome of the task. Consult your cloud provider's documentation for details. in: body required: true type: object schema-task: description: | The URI for the schema describing an image task. in: body required: true type: string schema-tasks: description: | The URI for the schema describing an image task list. in: body required: true type: string self: description: | A URI for this task. in: body required: true type: string status: description: | The current status of this task. The value can be ``pending``, ``processing``, ``success`` or ``failure``. in: body required: true type: string tasks: description: | A list of sparse *task* objects. Each object contains the following fields: - ``created_at`` - ``id`` - ``owner`` - ``schema`` - ``self`` - ``status`` - ``type`` - ``updated_at`` in: body required: true type: array type: description: | The type of task represented by this content. in: body required: true type: string updated_at: description: | The date and time when the task was updated. The date and time stamp format is `ISO 8601 `_. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/tasks-schemas.inc0000664000175000017500000000250100000000000021467 0ustar00zuulzuul00000000000000.. -*- rst -*- Task Schemas ************ Gets a JSON-schema document that represents an individual task and a list of tasks. .. _tasks-schema: Show tasks schema ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/tasks *(Since Images v2.2)* Shows a JSON schema document that represents a list of *tasks*. An tasks list entity is a container of entities containing abbreviated information about individual tasks. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-tasks-list-response.json :language: json .. _task-schema: Show task schema ~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/schemas/task *(Since Images v2.2)* Shows a JSON schema document that represents an *task* entity. The following schema is solely an example. Consider only the response to the API call as authoritative. Normal response codes: 200 Error response codes: 401 Request ------- This operation has no request parameters and does not accept a request body. Response Example ---------------- .. literalinclude:: samples/schemas-task-show-response.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/v2/tasks.inc0000664000175000017500000000672600000000000020063 0ustar00zuulzuul00000000000000.. -*- rst -*- Tasks ***** Creates, lists, and shows details for tasks. *(Since API v2.2)* General Information ~~~~~~~~~~~~~~~~~~~ **API Status** This API was made admin-only by default in the OpenStack Mitaka release. Thus the following calls may not be available to end users in your cloud. Please consult your cloud provider's documentation for more information. **Conceptual Overview** Please see the `Tasks `_ section of the Glance Developers Documentation for a conceptual overview of tasks. **Task Status** The possible status values for tasks are presented in the following table. .. list-table:: :header-rows: 1 * - Status - Description * - pending - The task is waiting for execution. * - processing - Execution of the task is underway. * - success - The task completed successfully. The ``result`` element should be populated. * - failure - The task failed to complete. The ``message`` element should be a non-empty string. Create task ~~~~~~~~~~~ .. rest_method:: POST /v2/tasks Creates a task. Normal response codes: 201 Error response codes: 401, 413, 415 Request ------- .. rest_parameters:: tasks-parameters.yaml - type: type - input: input Request Example --------------- .. literalinclude:: samples/task-create-request.json :language: json Response Parameters ------------------- .. rest_parameters:: tasks-parameters.yaml - created_at: created_at - id: id - input: input - message: message - owner: owner - result: result - schema: schema-task - self: self - status: status - type: type - updated_at: updated_at Response Example ---------------- .. literalinclude:: samples/task-create-response.json :language: json List tasks ~~~~~~~~~~ .. rest_method:: GET /v2/tasks Lists tasks. Normal response codes: 200 Error response codes: 403, 404, 413 Request ------- .. rest_parameters:: tasks-parameters.yaml - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - status: status-in-query - type: type-in-query Response Parameters ------------------- .. rest_parameters:: tasks-parameters.yaml - first: first - next: next - schema: schema-tasks - tasks: tasks Response Example ---------------- .. literalinclude:: samples/tasks-list-response.json :language: json Show task details ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/tasks/{task_id} Shows details for a task. Normal response codes: 200 Error response codes: 404 Request ------- .. rest_parameters:: tasks-parameters.yaml - task_id: task_id Response Parameters ------------------- .. rest_parameters:: tasks-parameters.yaml - created_at: created_at - expires_at: expires_at - id: id - input: input - message: message - owner: owner - result: result - schema: schema-task - self: self - status: status - type: type - updated_at: updated_at Response Example (task status: processing) ------------------------------------------ .. literalinclude:: samples/task-show-processing-response.json :language: json Response Example (task status: success) ------------------------------------------ .. literalinclude:: samples/task-show-success-response.json :language: json Response Example (task status: failure) --------------------------------------- .. literalinclude:: samples/task-show-failure-response.json :language: json ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/api-ref/source/versions/0000775000175000017500000000000000000000000017551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/versions/index.rst0000664000175000017500000000712500000000000021417 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. :tocdepth: 2 ====================== Image Service Versions ====================== .. rest_expand_all:: .. include:: versions.inc Version History *************** **Zed changes** - version 2.16 is CURRENT - version 2.15 is SUPPORTED - version 2.14 is SUPPORTED - version 2.13 is SUPPORTED **Yoga changes** - version 2.15 is CURRENT - version 2.14 is SUPPORTED - version 2.13 is SUPPORTED - version 2.12 is SUPPORTED **Wallaby changes** - version 2.12 is CURRENT - version 2.11 is SUPPORTED - version 2.10 is SUPPORTED **Victoria changes** - version 2.11 is CURRENT - version 2.10 is SUPPORTED - version 2.9 is SUPPORTED **Ussuri changes** - version 2.10 is CURRENT - version 2.9 is SUPPORTED - version 2.8 is SUPPORTED **Train changes** - version 2.9 is CURRENT - version 2.8 is SUPPORTED - version 2.7 is SUPPORTED **Rocky changes** - version 2.8 is EXPERIMENTAL - version 2.7 is CURRENT - version 1.1 is DELETED - version 1.0 is DELETED **Queens changes** - version 2.6 is CURRENT - version 2.5 is SUPPORTED **Pike changes** - version 2.6 is EXPERIMENTAL **Ocata changes** - version 2.5 is CURRENT - version 2.4 is SUPPORTED **Newton changes** - version 2.4 is CURRENT - version 2.3 is SUPPORTED - version 1.1 is DEPRECATED - version 1.0 is DEPRECATED **Kilo changes** - version 2.3 is CURRENT - version 1.1 is SUPPORTED **Havana changes** - version 2.2 is CURRENT - version 2.1 is SUPPORTED **Grizzly changes** - version 2.1 is CURRENT - version 2.0 is SUPPORTED **Folsom changes** - version 2.0 is CURRENT **Diablo changes** - version 1.1 is CURRENT - version 1.0 is SUPPORTED **Bexar changes** - version 1.0 is CURRENT What happened to the v1 API? **************************** The Image Service API version 1 was DEPRECATED in the OpenStack Newton release and removed during the development cycle for the Rocky release. The last OpenStack release containing the Image Service API v1 was the Queens release. The source files for the Image Service API Reference are contained in the OpenStack Glance source code repository. The files for the version 1 reference are no longer in the current development branch, but they may still be found in the stable branches in the repository. If you would like to consult the Image Service API version 1 Reference, you can check out a stable branch from the repository, build it locally, and use a web browser to read the generated HTML files. Building the API Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~ You'll need to have the following installed on your system: * python * git * tox Then: 1. Go to the Glance repository mirror on GitHub: https://github.com/openstack/glance 2. Clone the repository to your local system. 3. Checkout the **stable/queens** branch of glance. 4. From the root directory, use tox to build the api-reference: ``tox -e api-ref`` 5. The HTML version of the Image Service API Reference will be located in the ``api-ref/build/html`` directory. Use your browser to open the ``index.html`` file in that directory and you'll be able to browse the API Reference. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/api-ref/source/versions/samples/0000775000175000017500000000000000000000000021215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/versions/samples/image-versions-response.json0000664000175000017500000001054600000000000026702 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v2.16", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "CURRENT" }, { "id": "v2.15", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.14", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.13", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.12", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.11", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.10", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.9", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.8", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.7", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.6", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.5", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.4", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.3", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.2", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.1", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" }, { "id": "v2.0", "links": [ { "href": "http://glance.openstack.example.org/v2/", "rel": "self" } ], "status": "SUPPORTED" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/api-ref/source/versions/versions.inc0000664000175000017500000000143600000000000022120 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _versions-call: API versions ************ List API versions ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /versions Lists information about all Image service API versions supported by this deployment, including the URIs. Normal response codes: 200 Request ------- There are no request parameters. Response Example ---------------- .. literalinclude:: samples/image-versions-response.json :language: json List API versions ~~~~~~~~~~~~~~~~~ .. rest_method:: GET / Lists information about all Image service API versions supported by this deployment, including the URIs. Normal response codes: 300 Request ------- There are no request parameters. Response Example ---------------- .. literalinclude:: samples/image-versions-response.json :language: json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/bindep.txt0000664000175000017500000000241400000000000015061 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for install and tests; # see http://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg test] gcc [platform:rpm test] gettext [!platform:suse] gettext-runtime [platform:suse] libffi-dev [platform:dpkg] libffi-devel [platform:redhat] libffi48-devel [platform:suse] virtual/libffi [platform:gentoo] locales [platform:debian] mariadb [platform:rpm] mariadb-server [platform:redhat platform:debian] mariadb-devel [platform:redhat] libmariadb-dev-compat [platform:debian] libmysqlclient-dev [platform:ubuntu] libmysqlclient-devel [platform:suse] mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] postgresql-server-devel [platform:suse] # Provides pg_config libpython3-dev [platform:dpkg] python3-devel [platform:rpm] qemu [platform:dpkg devstack build-image-dib] qemu-utils [platform:dpkg devstack build-image-dib] qemu-img [platform:redhat] qemu-tools [platform:suse] # Provides qemu-img libpq-dev [platform:dpkg] libpcre3-dev [platform:dpkg] pcre-devel [platform:redhat] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/doc/0000775000175000017500000000000000000000000013623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/requirements.txt0000664000175000017500000000072400000000000017112 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx>=2.0.0,!=2.1.0 # BSD os-api-ref>=1.4.0 # Apache-2.0 openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # BSD whereto>=0.3.0 # Apache-2.0 # needed for apidoc support xattr>=0.9.2;sys_platform!='win32' # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/doc/source/0000775000175000017500000000000000000000000015123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/doc/source/_extra/0000775000175000017500000000000000000000000016405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/_extra/.htaccess0000664000175000017500000000637600000000000020217 0ustar00zuulzuul00000000000000# Documentation redirects # # NOTE(rosmaita): the web server is already doing a rewrite of the # pre-pike-url-format ^/developer/glance/(.*) to /glance/latest/$1 # so the only URLs we will see should be of the form /glance/release/whatever # Redirects for the new directory structure introduced by commit # 1c7f556d4f77d2dd7f282f2b41bdfb2abb6e5740 # to: admin RedirectMatch 301 ^/glance/([^/]+)/apache-httpd.html$ /glance/$1/admin/apache-httpd.html RedirectMatch 301 ^/glance/([^/]+)/authentication.html$ /glance/$1/admin/authentication.html RedirectMatch 301 ^/glance/([^/]+)/cache.html$ /glance/$1/admin/cache.html RedirectMatch 301 ^/glance/([^/]+)/controllingservers.html$ /glance/$1/admin/controllingservers.html RedirectMatch 301 ^/glance/([^/]+)/db-sqlalchemy-migrate.html$ /glance/$1/admin/db-sqlalchemy-migrate.html RedirectMatch 301 ^/glance/([^/]+)/db.html$ /glance/$1/admin/db.html RedirectMatch 301 ^/glance/([^/]+)/flows.html$ /glance/$1/admin/flows.html RedirectMatch 301 ^/glance/([^/]+)/notifications.html$ /glance/$1/admin/notifications.html RedirectMatch 301 ^/glance/([^/]+)/policies.html$ /glance/$1/admin/policies.html RedirectMatch 301 ^/glance/([^/]+)/property-protections.html$ /glance/$1/admin/property-protections.html RedirectMatch 301 ^/glance/([^/]+)/requirements.html$ /glance/$1/admin/requirements.html RedirectMatch 301 ^/glance/([^/]+)/rollingupgrades.html$ /glance/$1/admin/rollingupgrades.html RedirectMatch 301 ^/glance/([^/]+)/tasks.html$ /glance/$1/admin/tasks.html # to: configuration RedirectMatch 301 ^/glance/([^/]+)/configuring.html$ /glance/$1/configuration/configuring.html RedirectMatch 301 ^/glance/([^/]+)/sample-configuration.html$ /glance/$1/configuration/sample-configuration.html RedirectMatch 301 ^/glance/([^/]+)/opts/(.*) /glance/$1/configuration/$2 # to: contributor RedirectMatch 301 ^/glance/([^/]+)/architecture.html$ /glance/$1/contributor/architecture.html RedirectMatch 301 ^/glance/([^/]+)/database_architecture.html$ /glance/$1/contributor/database_architecture.html RedirectMatch 301 ^/glance/([^/]+)/database_migrations.html$ /glance/$1/contributor/database_migrations.html RedirectMatch 301 ^/glance/([^/]+)/domain_implementation.html$ /glance/$1/contributor/domain_implementation.html RedirectMatch 301 ^/glance/([^/]+)/domain_model.html$ /glance/$1/contributor/domain_model.html RedirectMatch 301 ^/glance/([^/]+)/contributing/(.*) /glance/$1/contributor/$2 # to: user RedirectMatch 301 ^/glance/([^/]+)/common-image-properties.html$ /glance/$1/user/common-image-properties.html RedirectMatch 301 ^/glance/([^/]+)/formats.html$ /glance/$1/user/formats.html RedirectMatch 301 ^/glance/([^/]+)/glanceapi.html$ /glance/$1/user/glanceapi.html RedirectMatch 301 ^/glance/([^/]+)/glanceclient.html$ /glance/$1/user/glanceclient.html RedirectMatch 301 ^/glance/([^/]+)/glancemetadefcatalogapi.html$ /glance/$1/user/glancemetadefcatalogapi.html RedirectMatch 301 ^/glance/([^/]+)/identifiers.html$ /glance/$1/user/identifiers.html RedirectMatch 301 ^/glance/([^/]+)/metadefs-concepts.html$ /glance/$1/user/metadefs-concepts.html RedirectMatch 301 ^/glance/([^/]+)/signature.html$ /glance/$1/user/signature.html RedirectMatch 301 ^/glance/([^/]+)/statuses.html$ /glance/$1/user/statuses.html # to: cli RedirectMatch 301 ^/glance/([^/]+)/man/(.*) /glance/$1/cli/$2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8102975 glance-29.0.0/doc/source/_static/0000775000175000017500000000000000000000000016551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000021022 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.814298 glance-29.0.0/doc/source/admin/0000775000175000017500000000000000000000000016213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/apache-httpd.rst0000664000175000017500000000610600000000000021312 0ustar00zuulzuul00000000000000======================= Running Glance in HTTPD ======================= In short Glance will not operate properly if tried to be ran without eventlet and introducing another web server into the mix does not make it any better. This exercise failed without ever having proper interest or resources to fix the underlying issues. None of the models deploying Glance as bare wsgi app under some httpd are currently advised. Since the Pike release Glance has packaged a wsgi script entrypoint that enables you to run it with a real web server like Apache HTTPD or nginx. To deploy this there are several patterns, which all fail different ways. This doc mentions three common ways of trying to deploy Glance with Apache HTTPD. .. warning:: As pointed out in the Pike and Queens release notes (see the "Known Issues" section of each), the Glance project team recommends that Glance be run in its normal standalone configuration, particularly in production environments. The full functionality of Glance is not available when Glance is deployed in the manner described in this document. In particular, the interoperable image import functionality does not work under such configuration. See the release notes for details. uWSGI Server HTTP Mode ---------------------- This has never worked properly nor it has been of any development focus. The clearest we can say is just don't do it. .. _mod_proxy_uwsgi: mod_proxy_uwsgi ''''''''''''''' This has not been doable since Ussuri as we only support Python 3. In theory the same applies as mod_wsgi but even without chunked encoding the code is still broken under uwsgi. mod_wsgi -------- This deployment method is not recommended for using Glance. The mod_wsgi protocol does not support ``Transfer-Encoding: chunked`` and therefore makes it unsuitable for use with Glance. However, you could theoretically deploy Glance using mod_wsgi but it will fail on any requests that use a chunked transfer encoding. .. _uwsgi_glossary: Glossary -------- .. glossary:: uwsgi protocol The native protocol used by the uWSGI server. (The acronym is written in all lowercase on purpose.) https://uwsgi-docs.readthedocs.io/en/latest/Protocol.html uWSGI project A project that aims at developing a full stack for building hosting services. It produces software, the uWSGI server, that is exposed in Python code as a module named ``uwsgi``. https://uwsgi-docs.readthedocs.io/en/latest/index.html https://pypi.org/project/uWSGI/ https://github.com/unbit/uwsgi mod_wsgi An Apache 2 HTTP server module that supports the Python WSGI specification. https://modwsgi.readthedocs.io/en/develop/ mod_proxy_uwsgi An Apache 2 HTTP Server module that provides a uwsgi gateway for mod_proxy. It communicates to the uWSGI server using the uwsgi protocol. http://httpd.apache.org/docs/trunk/mod/mod_proxy_uwsgi.html WSGI Web Server Gateway Interface, a Python standard published as `PEP 3333`_. https://wsgi.readthedocs.io/en/latest/index.html .. _PEP 3333: https://www.python.org/dev/peps/pep-3333 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/authentication.rst0000664000175000017500000000730300000000000021767 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _authentication: Authentication With Keystone ============================ Glance may optionally be integrated with Keystone. Setting this up is relatively straightforward, as the Keystone distribution includes the necessary middleware. Once you have installed Keystone and edited your configuration files, newly created images will have their `owner` attribute set to the tenant of the authenticated users, and the `is_public` attribute will cause access to those images for which it is `false` to be restricted to only the owner, users with admin context, or tenants/users with whom the image has been shared. Configuring the Glance servers to use Keystone ---------------------------------------------- Keystone is integrated with Glance through the use of middleware. The default configuration file for the Glance API uses a single piece of middleware called ``unauthenticated-context``, which generates a request context containing blank authentication information. In order to configure Glance to use Keystone, the ``authtoken`` and ``context`` middlewares must be deployed in place of the ``unauthenticated-context`` middleware. The ``authtoken`` middleware performs the authentication token validation and retrieves actual user authentication information. It can be found in the Keystone distribution. Configuring Glance API to use Keystone -------------------------------------- Configuring Glance API to use Keystone is relatively straight forward. The first step is to ensure that declarations for the two pieces of middleware exist in the ``glance-api-paste.ini``. Here is an example for ``authtoken``:: [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory auth_url = http://localhost:5000 project_domain_id = default project_name = service_admins user_domain_id = default username = glance_admin password = password1234 The actual values for these variables will need to be set depending on your situation. For more information, please refer to the Keystone `documentation`_ on the ``auth_token`` middleware. .. _`documentation`: https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html#configuration In short: * The ``auth_url`` variable points to the Keystone service. This information is used by the middleware to actually query Keystone about the validity of the authentication tokens. * The auth credentials (``project_name``, ``project_domain_id``, ``user_domain_id``, ``username``, and ``password``) will be used to retrieve a service token. That token will be used to authorize user tokens behind the scenes. Finally, to actually enable using Keystone authentication, the application pipeline must be modified. By default, it looks like:: [pipeline:glance-api] pipeline = versionnegotiation unauthenticated-context apiv1app Your particular pipeline may vary depending on other options, such as the image cache. This must be changed by replacing ``unauthenticated-context`` with ``authtoken`` and ``context``:: [pipeline:glance-api] pipeline = versionnegotiation authtoken context apiv1app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/cache.rst0000664000175000017500000002122500000000000020012 0ustar00zuulzuul00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _image-cache: The Glance Image Cache ====================== The Glance API server may be configured to have an optional local image cache. A local image cache stores a copy of image files, essentially enabling multiple API servers to serve the same image file, resulting in an increase in scalability due to an increased number of endpoints serving an image file. This local image cache is transparent to the end user -- in other words, the end user doesn't know that the Glance API is streaming an image file from its local cache or from the actual backend storage system. Managing the Glance Image Cache ------------------------------- While image files are automatically placed in the image cache on successful requests to ``GET /images/``, the image cache is not automatically managed. Here, we describe the basics of how to manage the local image cache on Glance API servers and how to automate this cache management. Configuration options for the Image Cache ----------------------------------------- The Glance cache uses two files: one for configuring the server and another for the utilities. The ``glance-api.conf`` is for the server and the ``glance-cache.conf`` is for the utilities. The following options are in both configuration files. These need the same values otherwise the cache will potentially run into problems. - ``image_cache_dir`` This is the base directory where Glance stores the cache data (Required to be set, as does not have a default). - ``image_cache_sqlite_db`` Path to the sqlite file database that will be used for cache management. This is a relative path from the ``image_cache_dir`` directory (Default:``cache.db``). - ``image_cache_driver`` The driver used for cache management. (Default:``sqlite``) - ``image_cache_max_size`` The size when the glance-cache-pruner will remove the oldest images, to reduce the bytes until under this value. (Default:``10 GB``) - ``image_cache_stall_time`` The amount of time an incomplete image will stay in the cache, after this the incomplete image will be deleted. (Default:``1 day``) The following values are the ones that are specific to the ``glance-cache.conf`` and are only required for the prefetcher to run correctly. - ``filesystem_store_datadir`` This is used if using the filesystem store, points to where the data is kept. - ``filesystem_store_datadirs`` This is used to point to multiple filesystem stores. Controlling the Growth of the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The image cache has a configurable maximum size (the ``image_cache_max_size`` configuration file option). The ``image_cache_max_size`` is an upper limit beyond which pruner, if running, starts cleaning the images cache. However, when images are successfully returned from a call to ``GET /images/``, the image cache automatically writes the image file to its cache, regardless of whether the resulting write would make the image cache's size exceed the value of ``image_cache_max_size``. In order to keep the image cache at or below this maximum cache size, you need to run the ``glance-cache-pruner`` executable. The recommended practice is to use ``cron`` to fire ``glance-cache-pruner`` at a regular interval. Cleaning the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~ Over time, the image cache can accumulate image files that are either in a stalled or invalid state. Stalled image files are the result of an image cache write failing to complete. Invalid image files are the result of an image file not being written properly to disk. To remove these types of files, you run the ``glance-cache-cleaner`` executable. The recommended practice is to use ``cron`` to fire ``glance-cache-cleaner`` at a semi-regular interval. Controlling Image Cache using V2 API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In Yoga, Glance API has added new APIs for managing cache related operations. In Zed, Glance has removed support of ``cache_images`` periodic job which was used to prefetch all queued images concurrently, logging the results of the fetch for each image. Instead the image can be immediately cached once it is queued for caching. You can use below API calls to control the cache related operations. To queue an image for immediate caching, you can use one of the following methods: * You can call ``PUT /cache/`` to queue the image for immediate caching with identifier ```` * Alternately, you can use the ``cache-queue`` command of glance client to queue the image for immediate caching. $ glance cache-queue This will queue the image with identifier ```` for immediate caching. To find out which images are in the image cache use one of the following methods: * You can call ``GET /cache`` to see a JSON-serialized list of mappings that show cached images, the number of cache hits on each image, the size of the image, and the times they were last accessed as well as images which are queued for caching. * Alternately, you can use the ``cache-list`` command of glance client. Example usage:: $ glance cache-list To delete images which are already cached or queued for caching use one of the following methods: * You can call ``DELETE /cache/`` to remove the image file for image with identifier ```` from the cache or queued state. * Alternately, you can use the ``cache-delete`` command of glance client. Example usage:: $ glance cache-delete * You can also call ``DELETE /cache`` with header ``x-image-cache-clear-target`` to delete either only cached images or only queued images or both. Possible values for header are ``cache``, ``queue``, ``both``. * Alternately, you can use the ``cache-clear`` command of glance client to delete only cached images or only queued images or both. Example usage:: $ glance cache-clear (default target is ``both``) $ glance cache-clear --target cached $ glance cache-clear --target queued * In Glance, image cache is local to each node, hence cache operations must be performed on each node locally. If OpenStack cloud is deployed with HA (3/5/7 controllers) then while running the cache related operations it is necessary to specify the HOST address using -H option. Example usage:: $ glance --host= cache-list Finding Which Images are in the Image Cache with glance-cache-manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can find out which images are in the image cache using one of the following methods: * If the ``cachemanage`` middleware is enabled in the application pipeline, you may call ``GET /cached-images`` to see a JSON-serialized list of mappings that show cached images, the number of cache hits on each image, the size of the image, and the times they were last accessed. * Alternately, you can use the ``glance-cache-manage`` program. This program may be run from a different host than the host containing the image cache. Example usage:: $ glance-cache-manage --host= list-cached * In Glance, image cache is local to each node, hence image cache management must be performed on each node locally. If OpenStack cloud is deployed with HA (3/5/7 controllers) then while running the cache management it is necessary to specify the HOST address using -H option. Example usage:: $ glance-cache-manage --host= list-cached * You can issue the following call on \*nix systems (on the host that contains the image cache):: $ ls -lhR $IMAGE_CACHE_DIR where ``$IMAGE_CACHE_DIR`` is the value of the ``image_cache_dir`` configuration variable. Note that the image's cache hit is not shown using this method. Manually Removing Images from the Image Cache with glance-cache-manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the ``cachemanage`` middleware is enabled, you may call ``DELETE /cached-images/`` to remove the image file for image with identifier ```` from the cache. Alternately, you can use the ``glance-cache-manage`` program. Example usage:: $ glance-cache-manage --host= delete-cached-image ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/controllingservers.rst0000664000175000017500000001767100000000000022725 0ustar00zuulzuul00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _controlling-servers: Controlling Glance Servers ========================== This section describes the ways to start, stop, and reload Glance's server programs. Starting a server ----------------- There are two ways to start a Glance server: * Manually calling the server program * Using the ``glance-control`` server daemon wrapper program We recommend using the second method. Manually starting the server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first is by directly calling the server program, passing in command-line options and a single argument for a ``paste.deploy`` configuration file to use when configuring the server application. .. note:: Glance ships with an ``etc/`` directory that contains sample ``paste.deploy`` configuration files that you can copy to a standard configuration directory and adapt for your own uses. Specifically, bind_host must be set properly. If you do `not` specify a configuration file on the command line, Glance will do its best to locate a configuration file in one of the following directories, stopping at the first config file it finds: * ``$CWD`` * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The filename that is searched for depends on the server application name. So, if you are starting up the API server, ``glance-api.conf`` is searched for. If no configuration file is found, you will see an error, like:: $ glance-api ERROR: Unable to locate any configuration file. Cannot load application glance-api Here is an example showing how you can manually start the ``glance-api`` server in a shell.:: $ sudo glance-api --config-file glance-api.conf --debug & jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: 2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf 2011-04-13 14:50:12 DEBUG [glance-api] ================================================ 2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29 2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292 2011-04-13 14:50:12 DEBUG [glance-api] debug True 2011-04-13 14:50:12 DEBUG [glance-api] default_store file 2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** 2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True 2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/ $ ps aux | grep glance root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance Simply supply the configuration file as the parameter to the ``--config-file`` option (the ``etc/glance-api.conf`` sample configuration file was used in the above example) and then any other options you want to use. (``--debug`` was used above to show some of the debugging output that the server shows when starting up. Call the server program with ``--help`` to see all available options you can specify on the command line.) For more information on configuring the server via the ``paste.deploy`` configuration files, see the section entitled :ref:`Configuring Glance servers ` Note that the server `daemonizes` itself by using the standard shell backgrounding indicator, ``&``, in the previous example. For most use cases, we recommend using the ``glance-control`` server daemon wrapper for daemonizing. See below for more details on daemonization with ``glance-control``. Using the ``glance-control`` program to start the server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The second way to start up a Glance server is to use the ``glance-control`` program. ``glance-control`` is a wrapper script that allows the user to start, stop, restart, and reload the other Glance server programs in a fashion that is more conducive to automation and scripting. Servers started via the ``glance-control`` program are always `daemonized`, meaning that the server program process runs in the background. To start a Glance server with ``glance-control``, simply call ``glance-control`` with a server and the word "start", followed by any command-line options you wish to provide. Start the server with ``glance-control`` in the following way:: $ sudo glance-control [OPTIONS] start [CONFPATH] .. note:: You must use the ``sudo`` program to run ``glance-control`` currently, as the pid files for the server programs are written to /var/run/glance/ Here is an example that shows how to start the ``glance-api`` server with the ``glance-control`` wrapper script. :: $ sudo glance-control api start glance-api.conf Starting glance-api with /home/jsuh/glance.conf $ ps aux | grep glance root 20038 4.0 0.1 12728 9116 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-api /home/jsuh/glance-api.conf jsuh 20042 0.0 0.0 3368 744 pts/1 S+ 12:51 0:00 grep glance The same configuration files are used by ``glance-control`` to start the Glance server programs, and you can specify (as the example above shows) a configuration file when starting the server. In order for your launched glance service to be monitored for unexpected death and respawned if necessary, use the following option:: $ sudo glance-control [service] start --respawn ... Note that this will cause ``glance-control`` itself to remain running. Also note that deliberately stopped services are not respawned, neither are rapidly bouncing services (where process death occurred within one second of the last launch). By default, output from glance services is discarded when launched with ``glance-control``. In order to capture such output via syslog, use the following option:: $ sudo glance-control --capture-output ... Stopping a server ----------------- If you started a Glance server manually and did not use the ``&`` backgrounding function, simply send a terminate signal to the server process by typing ``Ctrl-C`` If you started the Glance server using the ``glance-control`` program, you can use the ``glance-control`` program to stop it. Simply do the following:: $ sudo glance-control stop as this example shows:: $ sudo glance-control api stop Stopping glance-api pid: 17602 signal: 15 Restarting a server ------------------- You can restart a server with the ``glance-control`` program, as demonstrated here:: $ sudo glance-control api restart etc/glance-api.conf Stopping glance-api pid: 17611 signal: 15 Starting glance-api with /home/jpipes/repos/glance/trunk/etc/glance-api.conf Reloading a server ------------------ You can reload a server with the ``glance-control`` program, as demonstrated here:: $ sudo glance-control api reload Reloading glance-api (pid 18506) with signal(1) A reload sends a SIGHUP signal to the master process and causes new configuration settings to be picked up without any interruption to the running service (provided neither bind_host or bind_port has changed). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/db-sqlalchemy-migrate.rst0000664000175000017500000000415700000000000023127 0ustar00zuulzuul00000000000000.. Copyright 2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _legacy-database-management: Legacy Database Management ========================== .. note:: This page applies only to Glance releases prior to Ocata. From Ocata onward, please see :ref:`database-management`. The default metadata driver for Glance uses sqlalchemy, which implies there exists a backend database which must be managed. The ``glance-manage`` binary provides a set of commands for making this easier. The commands should be executed as a subcommand of 'db':: glance-manage db Sync the Database ----------------- :: glance-manage db sync Place a database under migration control and upgrade, creating it first if necessary. Determining the Database Version -------------------------------- :: glance-manage db version This will print the current migration level of a Glance database. Upgrading an Existing Database ------------------------------ :: glance-manage db upgrade This will take an existing database and upgrade it to the specified VERSION. Downgrading an Existing Database -------------------------------- Upgrades involve complex operations and can fail. Before attempting any upgrade, you should make a full database backup of your production data. As of Kilo, database downgrades are not supported, and the only method available to get back to a prior database version is to restore from backup[1]. [1]: https://wiki.openstack.org/wiki/OpsGuide/Operational_Upgrades#perform-a-backup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/db.rst0000664000175000017500000002205700000000000017340 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _database-management: Database Management =================== Updating and Migrating the Database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default metadata driver for Glance uses `SQLAlchemy`_, which implies there exists a backend database which must be managed. The ``glance-manage`` binary provides a set of commands for making this easier. The commands should be executed as a subcommand of 'db':: glance-manage db .. note:: In the Ocata release (14.0.0), the database migration engine was changed from *SQLAlchemy Migrate* to *Alembic*. This necessitated some changes in the ``glance-manage`` tool. While the user interface has been kept as similar as possible, the ``glance-manage`` tool included with the Ocata and more recent releases is incompatible with the "legacy" tool. If you are consulting these documents for information about the ``glance-manage`` tool in the Newton or earlier releases, please see the :ref:`legacy-database-management` page. .. _`SQLAlchemy`: http://www.sqlalchemy.org/ Migration Scripts ----------------- The migration scripts are stored in the directory: ``glance/db/sqlalchemy/alembic_migrations/versions`` As mentioned above, these scripts utilize the Alembic migration engine, which was first introduced in the Ocata release. All database migrations up through the Liberty release are consolidated into one Alembic migration script named ``liberty_initial``. Mitaka migrations are retained, but have been rewritten for Alembic and named using the new naming convention. A fresh Glance installation will apply the following migrations: * ``liberty-initial`` * ``mitaka01`` * ``mitaka02`` * ``ocata01`` .. note:: The "old-style" migration scripts have been retained in their `current directory`_ in the Ocata release so that interested operators can correlate them with the new migrations. This directory will be removed in future releases. In particular, the "old-style" script for the Ocata migration, `045_add_visibility.py`_ is retained for operators who are conversant in SQLAlchemy Migrate and are interested in comparing it with a "new-style" Alembic migration script. The Alembic script, which is the one actually used to do the upgrade to Ocata, is `ocata01_add_visibility_remove_is_public.py`_. .. _`current directory`: https://opendev.org/openstack/glance/src/branch/stable/ocata/glance/db/sqlalchemy/migrate_repo/versions .. _`045_add_visibility.py`: https://opendev.org/openstack/glance/src/branch/stable/ocata/glance/db/sqlalchemy/migrate_repo/versions/045_add_visibility.py .. _`ocata01_add_visibility_remove_is_public.py`: https://opendev.org/openstack/glance/src/branch/stable/ocata/glance/db/sqlalchemy/alembic_migrations/versions/ocata01_add_visibility_remove_is_public.py Sync the Database ----------------- :: glance-manage db sync [VERSION] Place an existing database under migration control and upgrade it to the specified VERSION or to the latest migration level if VERSION is not specified. .. note:: Prior to Ocata release the database version was a numeric value. For example: for the Newton release, the latest migration level was ``44``. Starting with Ocata, database version is a revision name corresponding to the latest migration included in the release. For the Ocata release, there is only one database migration and it is identified by revision ``ocata01``. So, the database version for Ocata release is ``ocata01``. This naming convention will change slightly with the introduction of zero-downtime upgrades, which is EXPERIMENTAL in Ocata, but is projected to be the official upgrade method beginning with the Pike release. See :ref:`zero-downtime` for more information. Determining the Database Version -------------------------------- :: glance-manage db version This will print the current migration level of a Glance database. Upgrading an Existing Database ------------------------------ :: glance-manage db upgrade [VERSION] This will take an existing database and upgrade it to the specified VERSION. .. _downgrades: Downgrading an Existing Database -------------------------------- Downgrading an existing database is **NOT SUPPORTED**. Upgrades involve complex operations and can fail. Before attempting any upgrade, you should make a full database backup of your production data. As of the OpenStack Kilo release (April 2013), database downgrades are not supported, and the only method available to get back to a prior database version is to restore from backup. Database Maintenance ~~~~~~~~~~~~~~~~~~~~ Like most OpenStack systems, Glance performs *soft* deletions when it deletes records from its database. Depending on usage patterns in your cloud, you may occasionally want to actually remove such soft deleted table rows. This operation is called *purging* the database, and you can use the ``glance-manage`` tool to do this. High-Level Database Architecture -------------------------------- Roughly, what we've got in the glance database is an **images** table that stores the image **id** and some other core image properties. All the other information about the image (for example: where the image data is stored in the backend, what projects an image has been shared with, image tags, custom image properties) is stored in other tables in which the **image id** is a foreign key. Because the **images** table keeps track of what image identifiers have been issued, it must be treated differently from the other tables with respect to purging the database. .. note:: Before the Rocky release (17.0.0), the **images** table was *not* treated differently, which made Glance vulnerable to `OSSN-0075 `_, "Deleted Glance image IDs may be reassigned". Please read through that OpenStack Security Note to understand the nature of the problem. Additionally, the Glance spec `Mitigate OSSN-0075 `_ contains a discussion of the issue and explains the changes made to the ``glance-manage`` tool for the Rocky release. The `Gerrit review of the spec `_ contains an extensive discussion of several alternative approaches and will give you an idea of why the Glance team provided a "mitigation" instead of a fix. Purging the Database -------------------- You can use the ``glance-manage`` tool to purge the soft-deleted rows from all tables *except* the images table:: glance-manage db purge This command takes two optional parameters: --age_in_days NUM Only purge rows that have been deleted for longer than *NUM* days. The default is 30 days. --max_rows NUM Purge a maximum of *NUM* rows from each table. The default is 100. All deleted rows are purged if equals -1 Purging the Images Table ------------------------ Remember that image identifiers are used by other OpenStack services that require access to images. These services expect that when an image is requested by ID, they will receive the same data every time. When the **images** table is purged of its soft-deleted rows, Glance loses its memory that those image IDs were ever mapped to some particular payload. Thus, care must be taken in purging the **images** table. We recommend that it be done much less frequently than the "regular" purge operation. Use the following command to purge the images table:: glance-manage db purge_images_table Be sure you have read and understood the implications of `OSSN-0075 `_ before you use this command, which purges the soft-deleted rows from the images table. It takes two optional parameters: --age_in_days NUM Only purge rows that have been deleted for longer than *NUM* days. The default is 180 days. --max_rows NUM Purge a maximum of *NUM* rows from the **images** table. The default is 100. All deleted rows are purged if equals -1 It is possible for this command to fail with an IntegrityError saying something like "Cannot delete or update a parent row: a foreign key constraint fails". This can happen when you try to purge records from the **images** table when related records have not yet been purged from other tables. The ``purge_images_table`` command should only be issued after all related information has been purged using the "regular" ``purge`` command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/flows.rst0000664000175000017500000000147700000000000020110 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Glance Flow Plugins =================== Flows ----- .. list-plugins:: glance.flows :detailed: Import Flows ------------ .. list-plugins:: glance.flows.import :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/index.rst0000664000175000017500000000076400000000000020063 0ustar00zuulzuul00000000000000=========================== Glance Administration Guide =========================== .. toctree:: :maxdepth: 2 authentication cache policies property-protections apache-httpd notifications tasks controllingservers flows interoperable-image-import multistores db db-sqlalchemy-migrate zero-downtime-db-upgrade rollingupgrades troubleshooting manage-images useful-image-properties requirements quotas os_hash_algo new-location-apis ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/interoperable-image-import.rst0000664000175000017500000007555500000000000024211 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _iir: Interoperable Image Import ========================== Version 2.6 of the Image Service API introduces new API calls that implement an interoperable image import process. These API calls, and the workflow for using them, are described in the `Interoperable Image Import`_ section of the `Image Service API reference`_. That documentation explains the end user's view of interoperable image import. In this section, we discuss what configuration options are available to operators. The interoperable image import process uses Glance tasks, but does *not* require that the Tasks API be exposed to end users. Further, it requires the **taskflow** task executor. The following configuration options must be set: * in the ``[task]`` option group: * ``task_executor`` must either be set to **taskflow** or be used in its default value * in the ``[taskflow_executor]`` options group: * The default values are fine. It's a good idea to read through the descriptions in the sample **glance-api.conf** file to see what options are available. .. note:: You can find an example glance-api.conf_ file in the **etc/** subdirectory of the Glance source code tree. Make sure that you are looking in the correct branch for the OpenStack release you are working with. * in the default options group: * ``node_staging_uri`` as a ``file:///path/to/dir`` URI (in the single-store case) or ``[os_glance_staging_store]/filesystem_store_datadir`` as a path (in the multi-store case) must specify a location writable by the glance user. See `Staging Directory Configuration`_ for more details and recommendations. * ``enabled_import_methods`` must specify the import methods you are exposing at your installation. The default value for this setting is ``['glance-direct','web-download']``. See the next section for a description of these import methods. Additionally, your policies must be such that an ordinary end user can manipulate tasks. In releases prior to Pike, we recommended that the task-related policies be admin-only so that end users could not access the Tasks API. In Pike, a new policy was introduced that controls access to the Tasks API. Thus it is now possible to keep the individual task policies unrestricted while not exposing the Tasks API to end users. Thus, the following is the recommended configuration for the task-related policies: .. code-block:: ini "get_task": "", "get_tasks": "", "add_task": "", "modify_task": "", "tasks_api_access": "role:admin", Image Import Methods -------------------- Glance provides four import methods that you can make available to your users: ``glance-direct``, ``web-download``, ``glance-download``, and ``copy-image``. By default, ``glance-download`` is not enabled. * The ``glance-direct`` import method allows your users to upload image data directly to Glance. * The ``web-download`` method allows an end user to import an image from a remote URL. The image data is retrieved from the URL and stored in the Glance backend. (In other words, this is a *copy-from* operation.) .. note:: The ``web-download`` import method replaces the copy-from functionality that was available in the Image API v1 but previously absent from v2. Additionally, the Image API v1 was removed in Glance 17.0.0 (Rocky). * The ``glance-download`` method allows an end user to import an image from a remote glance. This import method is used to import an image from another openstack region which is federated by the same keystone. * The ``copy-image`` method allows and end user to copy existing image to other Glance backends available in deployment. This import method is only used if multiple glance backends are enabled in your deployment. You control which methods are available to API users by the ``enabled_import_methods`` configuration option in the default section of the **glance-api.conf** file. Staging Directory Configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All of the import methods require a staging directory to be configured. This is essentially a temporary scratch location where the image can be staged (by the user via ``glance-direct``), downloaded (by ``web-download``), or pulled from an existing store (as in ``copy-image``) before being copied to a given store location. In the single-store case, this location is specified by a local filesystem URI in the ``node_staging_uri`` configuration option, like this: .. code-block:: ini [DEFAULTS] node_staging_uri = file:///var/lib/glance/staging In the multistore case, as described in :ref:`reserved_stores`, the staging store should be configured with the path: .. code-block:: ini [os_glance_staging_store] filesystem_store_datadir = /var/lib/glance/staging The staging directory for each worker must be configured for all import methods, and can be either local (recommended) or shared. In the case of a shared location, all Glance API workers will be dependent on the shared storage availability, will compete for IO resources, and may introduce additional network traffic. If `local` storage is chosen, you must configure each worker with the URL by which the other workers can reach it directly. This allows one worker behind a load balancer to stage an image in one request, and another worker to handle the subsequent import request. As an example: .. code-block:: ini [DEFAULTS] worker_self_reference_url = https://glance01.example.com:8000 This assumes you have several glance-api workers named ``glance01``, ``glance02``, etc behind your load balancer. Note that ``public_endpoint`` will be used as the default if ``worker_self_reference_url`` is not set. As this will generally be set to the same value across all workers, the result is that all workers will assume the same identity and thus revert to shared-staging behavior. If ``public_endpoint`` is set differently for one or a group of workers, they will be considered isolated and thus not sharing staging storage. Configuring the glance-direct method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the ``glance-direct`` method, make sure that ``glance-direct`` is included in the list specified by your ``enabled_import_methods`` setting, and that staging directory config options are set properly. Note that in order to use ``glance-direct``, the ``worker_self_reference_url`` configuration option must be set as above, or all Glance API workers must have their staging directory mounted to a common location (such as an NFS server). Configuring the web-download method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the ``web-download`` import method, make sure that it is included in the list of methods in the ``enabled_import_methods`` option, and that staging directory config options are set properly. Additionally, you have the following configuration available. Depending on the nature of your cloud and the sophistication of your users, you may wish to restrict what URIs they may use for the web-download import method. .. note:: You should be aware of OSSN-0078_, "copy_from in Image Service API v1 allows network port scan". The v1 copy_from feature does not have the configurability described here. You can do this by configuring options in the ``[import_filtering_opts]`` section of the **glance-image-import.conf** file. .. note:: The **glance-image-import.conf** is an optional file. (See below for a discussion of the default settings if you don't include this file.) You can find an example file named glance-image-import.conf.sample_ in the **etc/** subdirectory of the Glance source code tree. Make sure that you are looking in the correct branch for the OpenStack release you are working with. You can whitelist ("allow *only* these") or blacklist ("do *not* allow these") at three levels: * scheme (``allowed_schemes``, ``disallowed_schemes``) * host (``allowed_hosts``, ``disallowed_hosts``) * port (``allowed_ports``, ``disallowed_ports``) There are six configuration options, but the way it works is that if you specify both at any level, the whitelist is honored and the blacklist is ignored. (So why have both? Well, you may want to whitelist a scheme, but blacklist a host, and whitelist a particular port.) Validation of a URI happens as follows: 1. The scheme is checked. a. missing scheme: reject b. If there's a whitelist, and the scheme is not in it: reject. Otherwise, skip c and continue on to 2. c. If there's a blacklist, and the scheme is in it: reject. 2. The hostname is checked. a. missing hostname: reject b. If there's a whitelist, and the host is not in it: reject. Otherwise, skip c and continue on to 3. c. If there's a blacklist, and the host is in it: reject. 3. If there's a port in the URI, the port is checked. a. If there's a whitelist, and the port is not in it: reject. Otherwise, skip b and continue on to 4. b. If there's a blacklist, and the port is in it: reject. 4. The URI is accepted as valid. Note that if you allow a scheme, either by whitelisting it or by not blacklisting it, any URI that uses the default port for that scheme by not including a port in the URI is allowed. If it does include a port in the URI, the URI will be validated according to the above rules. Default settings ++++++++++++++++ The **glance-image-import.conf** is an optional file. Here are the default settings for these options: * ``allowed_schemes`` - ``['http', 'https']`` * ``disallowed_schemes`` - empty list * ``allowed_hosts`` - empty list * ``disallowed_hosts`` - empty list * ``allowed_ports`` - ``[80, 443]`` * ``disallowed_ports`` - empty list Thus if you use the defaults, end users will only be able to access URIs using the http or https scheme. The only ports users will be able to specify are 80 and 443. (Users do not have to specify a port, but if they do, it must be either 80 or 443.) .. note:: The **glance-image-import.conf** is an optional file. You can find an example file named glance-image-import.conf.sample_ in the **etc/** subdirectory of the Glance source code tree. Make sure that you are looking in the correct branch for the OpenStack release you are working with. Configuring the glance-download method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the ``glance-download`` import method, make sure that it is included in the list of methods in the ``enabled_import_methods`` option, and that staging directory config options are set properly. Additionally, you have the following configuration available. Depending your needs on image properties you may configure addional properties to be copied from the remote image to the local image. You can do this by configuring options in the ``[glance_download_opts]`` section of the **glance-image-import.conf** file. ``extra_properties`` options is a list of properties that should be copied from the remote image. The properties listed should be read as properties that "start with" as it allows you to set a namespace instead of explicitly listing each property of the namespace. Default values are : ``['hw_', 'trait:', 'os_distro', 'os_secure_boot', 'os_type']`` If you decide to set this option the default values will be totally ignored unless you explicitly set them. .. note:: The ``extra_properties`` option will ignore namespaces reserved by glance, meaning that all the properties starting with ``os_glance`` won't be set on the local image. .. note:: The **glance-image-import.conf** is an optional file. You can find an example file named glance-image-import.conf.sample in the **etc/** subdirectory of the Glance source code tree. Make sure that you are looking in the correct branch for the OpenStack release you are working with. Configuring the copy-image method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the ``copy-image`` method, make sure that ``copy-image`` is included in the list specified by your ``enabled_import_methods`` setting as well as you have multiple glance backends configured in your environment. To allow copy-image operation to be performed by users on images they do not own, you can set the `copy_image` policy to something other than the default, for example:: "copy_image": "'public':%(visibility)s" .. _iir_plugins: Copying existing-image in multiple stores ----------------------------------------- Starting with Ussuri release, it is possible to copy existing image data into multiple stores using interoperable image import workflow. Basically user will be able to copy only those images which are owned by him. Unless the copying of unowned images are allowed by cloud operator by enforcing policy check, user will get Forbidden (Operation not permitted response) for such copy operations. Even if copying of unowned images is allowed by enforcing policy, ownership of the image remains unchanged. Operator or end user can either copy the existing image by specifying ``all_stores`` as True in request body or by passing list of desired stores in request body. If ``all_stores`` is specified and image data is already present in some of the available stores then those stores will be silently excluded from the list of all configured stores, whereas if ``all_stores`` is False, ``stores`` are specified in explicitly in request body and if image data is present in any of the specified store then the request will be rejected. In case of ``all_stores`` is specified in request body and cloud operator has also configured a read-only ``http`` store then it will be excluded explicitly. Image will be copied to staging area from one of the available locations and then import processing will be continued using import workflow as explained in below ``Importing in multiple stores`` section. Importing in multiple stores ---------------------------- Starting with Ussuri, it is possible to import data into multiple stores using interoperable image import workflow. The status of the image is set to ``active`` according to the value of ``all_stores_must_succeed`` parameter. * If set to False: the image will be available as soon as an import to one store has succeeded. * If set to True (default): the status is set to ``active`` only when all stores have been successfully treated. Check progress ~~~~~~~~~~~~~~ As each store is treated sequentially, it can take quite some time for the workflow to complete depending on the size of the image and the number of stores to import data to. It is possible to follow task progress by looking at 2 reserved image properties: * ``os_glance_importing_to_stores``: This property contains a list of stores that has not yet been processed. At the beginning of the import flow, it is filled with the stores provided in the request. Each time a store is fully handled, it is removed from the list. * ``os_glance_failed_import``: Each time an import in a store fails, it is added to this list. This property is emptied at the beginning of the import flow. These 2 properties are also available in the notifications sent during the workflow: .. note:: Example An operator calls the import image api with the following parameters:: curl -i -X POST -H "X-Auth-Token: $token" -H "Content-Type: application/json" -d '{"method": {"name":"glance-direct"}, "stores": ["ceph1", "ceph2"], "all_stores_must_succeed": false}' $image_url/v2/images/{image_id}/import The upload fails for 'ceph2' but succeed on 'ceph1'. Since the parameter ``all_stores_must_succeed`` has been set to 'false', the task ends successfully and the image is now active. Notifications sent by glance looks like (payload is truncated for clarity):: { "priority": "INFO", "event_type": "image.prepare", "timestamp": "2019-08-27 16:10:30.066867", "payload": {"status": "importing", "name": "example", "backend": "ceph1", "os_glance_importing_to_stores": ["ceph1", "ceph2"], "os_glance_failed_import": [], ...}, "message_id": "1c8993ad-e47c-4af7-9f75-fa49596eeb10", ... } { "priority": "INFO", "event_type": "image.upload", "timestamp": "2019-08-27 16:10:32.058812", "payload": {"status": "active", "name": "example", "backend": "ceph1", "os_glance_importing_to_stores": ["ceph2"], "os_glance_failed_import": [], ...}, "message_id": "8b8993ad-e47c-4af7-9f75-fa49596eeb11", ... } { "priority": "INFO", "event_type": "image.prepare", "timestamp": "2019-08-27 16:10:33.066867", "payload": {"status": "active", "name": "example", "backend": "ceph2", "os_glance_importing_to_stores": ["ceph2"], "os_glance_failed_import": [], ...}, "message_id": "1c8993ad-e47c-4af7-9f75-fa49596eeb18", ... } { "priority": "ERROR", "event_type": "image.upload", "timestamp": "2019-08-27 16:10:34.058812", "payload": "Error Message", "message_id": "8b8993ad-e47c-4af7-9f75-fa49596eeb11", ... } Customizing the image import process ------------------------------------ When a user issues the image-import call, Glance retrieves the staged image data, processes it, and saves the result in the backing store. You can customize the nature of this processing by using *plugins*. Some plugins are provided by the Glance project team, you can use third-party plugins, or you can write your own. Technical information ~~~~~~~~~~~~~~~~~~~~~ The import step of interoperable image import is performed by a `Taskflow`_ "flow" object. This object, provided by Glance, will call any plugins you have specified in the ``glance-image-import.conf`` file. The plugins are loaded by `Stevedore`_ and must be listed in the entry point registry in the namespace ``glance.image_import.plugins``. (If you are using only plugins provided by the Glance project team, these are already registered for you.) A plugin must be written in Python as a `Taskflow "Task" object`_. The file containing this object must be present in the ``glance/async_/flows/plugins`` directory. The plugin file must contain a ``get_flow`` function that returns a Taskflow Task object wrapped in a linear flow. See the ``no_op`` plugin, located at ``glance/async_/flows/plugins/no_op.py`` for an example of how to do this. Specifying the plugins to be used ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ First, the plugin code must exist in the directory ``glance/async_/flows/plugins``. The name of a plugin is the filename (without extension) of the file containing the plugin code. For example, a file named ``fred_mertz.py`` would contain the plugin ``fred_mertz``. Second, the plugin must be listed in the entry point list for the ``glance.image_import.plugins`` namespace. (If you are using only plugins provided with Glance, this will have already been done for you, but it never hurts to check.) The entry point list is in ``setup.cfg``. Find the section with the heading ``[entry_points]`` and look for the line beginning with ``glance.image_import.plugins =``. It will be followed by a series of lines of the form:: = :get_flow For example:: no_op = glance.async_.flows.plugins.no_op:get_flow Make sure any plugin you want to use is included here. Third, the plugin must be listed in the ``glance-image-import.conf`` file as one of the plugin names in the list providing the value for the ``image_import_plugins`` option. Plugins are executed in the order they are specified in this list. The Image Property Injection Plugin ----------------------------------- .. list-table:: * - release introduced - Queens (Glance 16.0.0) * - configuration file - ``glance-image-import.conf`` * - configuration file section - ``[inject_metadata_properties]`` This plugin implements the Glance spec `Inject metadata properties automatically to non-admin images`_. One use case for this plugin is a situation where an operator wants to put specific metadata on images imported by end users so that virtual machines booted from these images will be located on specific compute nodes. Since it's unlikely that an end user (the image owner) will know the appropriate properties or values, an operator may use this plugin to inject the properties automatically upon image import. .. note:: This plugin may only be used as part of the interoperable image import workflow (``POST v2/images/{image_id}/import``). *It has no effect on the image data upload call* (``PUT v2/images/{image_id}/file``). You can guarantee that your end users must use interoperable image import by restricting the ``upload_image`` policy appropriately in the Glance ``policy.yaml`` file. By default, this policy is unrestricted (that is, any authorized user may make the image upload call). For example, to allow only admin or service users to make the image upload call, the policy could be restricted as follows: .. code-block:: text "upload_image": "role:admin or (service_user_id:) or (service_roles:)" where "service_role" is the role which is created for the service user and assigned to trusted services. To use the Image Property Injection Plugin, the following configuration is required. 1. You will need to configure 'glance-image-import.conf' file as shown below: .. code-block:: ini [image_import_opts] image_import_plugins = [inject_image_metadata] [inject_metadata_properties] ignore_user_roles = admin,... inject = property1:value1,property2:value2,... The first section, ``image_import_opts``, is used to enable the plugin by specifying the plugin name as one of the elements of the list that is the value of the `image_import_plugins` parameter. The plugin name is simply the module name under glance/async\_/flows/plugins/ The second section, ``inject_metadata_properties``, is where you set the parameters for the injection plugin. (Note that the values you specify here only have an effect if the plugin has been enabled in the ``image_import_plugins`` list as described above.) * ``ignore_user_roles`` is a comma-separated list of Keystone roles that the plugin will ignore. In other words, if the user making the image import call has any of these roles, the plugin will not inject any properties into the image. * ``inject`` is a comma-separated list of properties and values that will be injected into the image record for the imported image. Each property and value should be separated by a colon (':') as shown in the example above. 2. If your use case is such that you don't want to allow end-users to create, modify, or delete metadata properties that you are injecting during the interoperable image import process, you will need to protect these properties using the Glance property protection feature (available since the Havana release). For example, suppose there is a property named 'property1' that you want injected during import, but you only want an administrator or service user to be able to create this property, and you want only an administrator to be able to modify or delete it. You could accomplish this by adding the following to the property protection configuration file: .. code-block:: ini [property1] create = admin,service_role read = admin,service_role,member,_member_ update = admin delete = admin See the :ref:`property-protections` section of this Guide for more information. The Image Conversion -------------------- .. list-table:: * - release introduced - Rocky (Glance 17.0.0) * - configuration file - ``glance-image-import.conf`` * - configuration file section - ``[image_conversion]`` This plugin implements automated image conversion for Interoperable Image Import. One use case for this plugin would be environments where Ceph is used as image back-end and operators want to optimize the back-end capabilities by ensuring that all images will be in raw format while not putting the burden of converting the images to their end users. .. note:: This plugin may only be used as part of the interoperable image import workflow (``POST v2/images/{image_id}/import``). *It has no effect on the image data upload call* (``PUT v2/images/{image_id}/file``). You can guarantee that your end users must use interoperable image import by restricting the ``upload_image`` policy appropriately in the Glance ``policy.yaml`` file. By default, this policy is unrestricted (that is, any authorized user may make the image upload call). For example, to allow only admin or service users to make the image upload call, the policy could be restricted as follows: .. code-block:: text "upload_image": "role:admin or (service_user_id:) or (service_roles:)" where "service_role" is the role which is created for the service user and assigned to trusted services. To use the Image Conversion Plugin, the following configuration is required. You will need to configure 'glance-image-import.conf' file as shown below: .. code-block:: ini [image_import_opts] image_import_plugins = ['image_conversion'] [image_conversion] output_format = raw .. note:: The default output format is raw in which case there is no need to have 'image_conversion' section and its 'output_format' defined in the config file. The input format needs to be one of the `qemu-img supported ones`_ for this feature to work. In case of qemu-img call failing on the source image the import process will fail if 'image_conversion' plugin is enabled. .. note:: ``image_import_plugins`` config option is a list and multiple plugins can be enabled for the import flow. The plugins are not run in parallel. One can enable multiple plugins by configuring them in the ``glance-image-import.conf`` for example as following: .. code-block:: ini [image_import_opts] image_import_plugins = ['inject_image_metadata', 'image_conversion'] [inject_metadata_properties] ignore_user_roles = admin,... inject = "property1":"value1","property2":"value2",... [image_conversion] output_format = raw The Image Decompression ----------------------- .. list-table:: * - release introduced - Ussuri (Glance 20.0.0) * - configuration file - ``glance-image-import.conf`` This plugin implements automated image decompression for Interoperable Image Import. One use case for this plugin would be environments where user or operator wants to use 'web-download' method and the image provider supplies only compressed images. .. note:: This plugin may only be used as part of the interoperable image import workflow (``POST v2/images/{image_id}/import``). *It has no effect on the image data upload call* (``PUT v2/images/{image_id}/file``). You can guarantee that your end users must use interoperable image import by restricting the ``upload_image`` policy appropriately in the Glance ``policy.yaml`` file. By default, this policy is unrestricted (that is, any authorized user may make the image upload call). For example, to allow only admin or service users to make the image upload call, the policy could be restricted as follows: .. code-block:: text "upload_image": "role:admin or (service_user_id:) or (service_roles:)" where "service_role" is the role which is created for the service user and assigned to trusted services. .. note:: The plugin will not decompress images whose ``container_format`` is set to ``compressed``. This is to maintain the original intent of the image creator. To use the Image Decompression Plugin, the following configuration is required. You will need to add "image_decompression" to 'glance-image-import.conf' file as shown below: .. code-block:: ini [image_import_opts] image_import_plugins = ['image_decompression'] .. note:: The supported archive types for Image Decompression are zip, lha/lzh and gzip. Currently the plugin does not support multi-layered archives (like tar.gz). Lha/lzh is only supported in case python3 `lhafile` dependency library is installed, absence of this dependency will fail the import job where lha file is provided. (In this case we know it won't be bootable as the image is compressed and we do not have means to decompress it.) .. note:: ``image_import_plugins`` config option is a list and multiple plugins can be enabled for the import flow. The plugins are not run in parallel. One can enable multiple plugins by configuring them in the ``glance-image-import.conf`` for example as following: .. code-block:: ini [image_import_opts] image_import_plugins = ['image_decompression', 'image_conversion'] [image_conversion] output_format = raw If Image Conversion is used together, decompression must happen first, this is ensured by ordering the plugins. .. _glance-api.conf: https://opendev.org/openstack/glance/src/branch/master/etc/glance-api.conf .. _glance-image-import.conf.sample: https://opendev.org/openstack/glance/src/branch/master/etc/glance-image-import.conf.sample .. _`Image Import Refactor`: https://specs.openstack.org/openstack/glance-specs/specs/mitaka/approved/image-import/image-import-refactor.html .. _`Image Service API reference`: https://docs.openstack.org/api-ref/image/ .. _`Inject metadata properties automatically to non-admin images`: https://specs.openstack.org/openstack/glance-specs/specs/queens/approved/glance/inject-automatic-metadata.html .. _`Interoperable Image Import`: https://docs.openstack.org/api-ref/image/v2/index.html#interoperable-image-import .. _OSSN-0078: https://wiki.openstack.org/wiki/OSSN/OSSN-0078 .. _`Stevedore`: https://docs.openstack.org/stevedore .. _`Taskflow`: https://docs.openstack.org/taskflow .. _`Taskflow "Task" object`: https://docs.openstack.org/taskflow/latest/user/atoms.html#task .. _`qemu-img supported ones`: https://github.com/qemu/qemu/blob/master/qemu-img.texi#L599-L725 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/manage-images.rst0000664000175000017500000003202100000000000021436 0ustar00zuulzuul00000000000000============= Manage images ============= The cloud operator assigns roles to users. Roles determine who can upload and manage images. The operator might restrict image upload and management to only cloud administrators or operators. You can upload images through the :command:`glance image-create` or :command:`glance image-create-via-import` command or the Image service API. You can use the ``glance`` client for the image management. It provides mechanisms to do all operations supported by the Images API v2. After you upload an image, you cannot change the content, but you can update the metadata. For details about image creation, see the `Virtual Machine Image Guide `__. List or get details for images (glance) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To get a list of images and to get further details about a single image, use :command:`glance image-list` and :command:`glance image-show` commands. .. code-block:: console $ glance image-list +--------------------------------------+---------------------------------+ | ID | Name | +--------------------------------------+---------------------------------+ | dfc1dfb0-d7bf-4fff-8994-319dd6f703d7 | cirros-0.3.5-x86_64-uec | | a3867e29-c7a1-44b0-9e7f-10db587cad20 | cirros-0.3.5-x86_64-uec-kernel | | 4b916fba-6775-4092-92df-f41df7246a6b | cirros-0.3.5-x86_64-uec-ramdisk | | d07831df-edc3-4817-9881-89141f9134c3 | myCirrosImage | +--------------------------------------+---------------------------------+ .. code-block:: console $ glance image-show d07831df-edc3-4817-9881-89141f9134c3 +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | 443b7623e27ecf03dc9e01ee93f67afe | | container_format | ami | | created_at | 2016-08-11T15:07:26Z | | disk_format | ami | | file | /v2/images/d07831df-edc3-4817-9881-89141f9134c3/file | | id | d07831df-edc3-4817-9881-89141f9134c3 | | min_disk | 0 | | min_ram | 0 | | name | myCirrosImage | | os_hash_algo | sha512 | | os_hash_value | 6513f21e44aa3da349f248188a44bc304a3653a04122d8fb4535 | | | 423c8e1d14cd6a153f735bb0982e2161b5b5186106570c17a9e5 | | | 8b64dd39390617cd5a350f78 | | os_hidden | False | | owner | d88310717a8e4ebcae84ed075f82c51e | | protected | False | | schema | /v2/schemas/image | | size | 13287936 | | status | active | | tags | | | updated_at | 2016-08-11T15:20:02Z | | virtual_size | None | | visibility | private | +------------------+------------------------------------------------------+ When viewing a list of images, you can also use ``grep`` to filter the list, as follows: .. code-block:: console $ glance image-list | grep 'cirros' | dfc1dfb0-d7bf-4fff-8994-319dd6f703d7 | cirros-0.3.5-x86_64-uec | | a3867e29-c7a1-44b0-9e7f-10db587cad20 | cirros-0.3.5-x86_64-uec-kernel | | 4b916fba-6775-4092-92df-f41df7246a6b | cirros-0.3.5-x86_64-uec-ramdisk | Create or update an image (glance) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To create an image, use :command:`glance image-create`: .. code-block:: console $ glance image-create --name imageName To update an image, you must specify its ID and use :command:`glance image-update`: .. code-block:: console $ glance image-update --property x="y" The following list explains the commonly used properties that you can set or modify when using the ``image-create`` and ``image-update`` commands. For more information, refer to the `OpenStack Useful Image Properties `_. ``--architecture `` Operating system architecture as specified in https://docs.openstack.org/glance/latest/admin/useful-image-properties.html ``--protected [True|False]`` If true, image will not be deletable. ``--name `` Descriptive name for the image ``--instance-uuid `` Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.) ``--min-disk `` Amount of disk space (in GB) required to boot image. ``--visibility `` Scope of image accessibility. Valid values: ``public``, ``private``, ``community``, ``shared`` ``--kernel-id `` ID of image stored in Glance that should be used as the kernel when booting an AMI-style image. ``--os-version `` Operating system version as specified by the distributor ``--disk-format `` Format of the disk. May not be modified once an image has gone to ``active`` status. Valid values: ``ami``, ``ari``, ``aki``, ``vhd``, ``vhdx``, ``vmdk``, ``raw``, ``qcow2``, ``vdi``, ``iso``, ``ploop`` ``--os-distro `` Common name of operating system distribution as specified in https://docs.openstack.org/glance/latest/admin/useful-image-properties.html ``--owner `` Owner of the image. Usually, may be set by an admin only. ``--ramdisk-id `` ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image. ``--min-ram `` Amount of ram (in MB) required to boot image. ``--container-format `` Format of the container. May not be modified once an image has gone to ``active`` status. Valid values: ``ami``, ``ari``, ``aki``, ``bare``, ``ovf``, ``ova``, ``docker``, ``compressed`` ``--hidden [True|False]`` If true, image will not appear in default image list response. ``--property `` Arbitrary property to associate with image. May be used multiple times. ``--remove-property key`` Name of arbitrary property to remove from the image. The following example shows the command that you would use to upload a CentOS 6.3 image in qcow2 format and configure it for public access: .. code-block:: console $ glance image-create --disk-format qcow2 --container-format bare \ --visibility public --file ./centos63.qcow2 --name centos63-image The following example shows how to update an existing image with a properties that describe the disk bus, the CD-ROM bus, and the VIF model: .. note:: When you use OpenStack with VMware vCenter Server, you need to specify the ``vmware_disktype`` and ``vmware_adaptertype`` properties with :command:`glance image-create`. Also, we recommend that you set the ``hypervisor_type="vmware"`` property. For more information, see `Images with VMware vSphere `_ in the OpenStack Configuration Reference. .. code-block:: console $ glance image-update \ --property hw_disk_bus=scsi \ --property hw_cdrom_bus=ide \ --property hw_vif_model=e1000 \ Currently the libvirt virtualization tool determines the disk, CD-ROM, and VIF device models based on the configured hypervisor type (``libvirt_type`` in ``/etc/nova/nova.conf`` file). For the sake of optimal performance, libvirt defaults to using virtio for both disk and VIF (NIC) models. The disadvantage of this approach is that it is not possible to run operating systems that lack virtio drivers, for example, BSD, Solaris, and older versions of Linux and Windows. If you specify a disk or CD-ROM bus model that is not supported, see the Disk_and_CD-ROM_bus_model_values_table_. If you specify a VIF model that is not supported, the instance fails to launch. See the VIF_model_values_table_. The valid model values depend on the ``libvirt_type`` setting, as shown in the following tables. .. _Disk_and_CD-ROM_bus_model_values_table: **Disk and CD-ROM bus model values** +-------------------------+--------------------------+ | libvirt\_type setting | Supported model values | +=========================+==========================+ | qemu or kvm | * fdc | | | | | | * ide | | | | | | * scsi | | | | | | * sata | | | | | | * virtio | | | | | | * usb | +-------------------------+--------------------------+ | xen | * ide | | | | | | * xen | +-------------------------+--------------------------+ .. _VIF_model_values_table: **VIF model values** +-------------------------+--------------------------+ | libvirt\_type setting | Supported model values | +=========================+==========================+ | qemu or kvm | * e1000 | | | | | | * ne2k\_pci | | | | | | * pcnet | | | | | | * rtl8139 | | | | | | * virtio | +-------------------------+--------------------------+ | xen | * e1000 | | | | | | * netfront | | | | | | * ne2k\_pci | | | | | | * pcnet | | | | | | * rtl8139 | +-------------------------+--------------------------+ | vmware | * VirtualE1000 | | | | | | * VirtualPCNet32 | | | | | | * VirtualVmxnet | +-------------------------+--------------------------+ .. note:: By default, hardware properties are retrieved from the image properties. However, if this information is not available, the ``libosinfo`` database provides an alternative source for these values. If the guest operating system is not in the database, or if the use of ``libosinfo`` is disabled, the default system values are used. Users can set the operating system ID or a ``short-id`` in image properties. For example: .. code-block:: console $ glance image-update --property short-id=fedora23 \ Create an image from ISO image ------------------------------ You can upload ISO images to the Image service (glance). You can subsequently boot an ISO image using Compute. In the Image service, run the following command: .. code-block:: console $ glance image-create --name ISO_IMAGE --file IMAGE.iso \ --disk-format iso --container-format bare Optionally, to confirm the upload in Image service, run: .. code-block:: console $ glance image-list Troubleshoot image creation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you encounter problems in creating an image in the Image service or Compute, the following information may help you troubleshoot the creation process. * Ensure that the version of qemu you are using is version 0.14 or later. Earlier versions of qemu result in an ``unknown option -s`` error message in the ``/var/log/nova/nova-compute.log`` file. * Examine the ``/var/log/nova/nova-api.log`` and ``/var/log/nova/nova-compute.log`` log files for error messages. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/multistores.rst0000664000175000017500000001664500000000000021353 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _multi_stores: Multi Store Support =================== .. note:: The Multi Store feature was introduced as EXPERIMENTAL in Rocky and is now fully supported in the Train release. Scope of this document ---------------------- This page describes how to enable multiple stores in glance. Prerequisites ------------- * Glance version 17.0.0 or Later * Glance Store Library 0.25.0 or Later * Glance not using the Glance Registry * Available backends Procedure --------- In this section, we discuss what configuration options are available to operators to enable multiple stores support. * in the ``[DEFAULT]`` options group: * ``enabled_backends`` must be set as a key:value pair where key represents the identifier for the store and value will be the type of the store. Valid values are one of ``file``, ``http``, ``rbd``, ``swift``, ``cinder`` or ``vmware``. In order to have multiple stores operator can specify multiple key:value separated by comma. .. warning:: The store identifier prefix ``os_glance_`` is reserved. If you define a store identifier with this prefix, the glance service will refuse to start. The http store type is always treated by Glance as a read-only store. This is indicated in the response to the ``/v2/stores/info`` call, where an http type store will have the attribute ``read-only: True`` in addition to the usual ``id`` and ``description`` fields. .. code-block:: ini [DEFAULT] enabled_backends = fast:rbd, cheap:rbd, shared:file, reliable:file * in the ``[glance_store]`` options group: * ``default_backend`` must be set to one of the identifier which are defined using ``enabled_backends`` option. If ``default_backend`` is not set or if it is not representing one of the valid store drivers then it will prevent glance api service from starting. .. code-block:: ini [glance_store] default_backend = fast * For each of the store identifier defined in ``enabled_backends`` section operator needs to add a new config group which will define config options related to that particular store. .. code-block:: ini [shared] filesystem_store_datadir = /opt/stack/data/glance/shared_images/ store_description = "Shared filesystem store" [reliable] filesystem_store_datadir = /opt/stack/data/glance/reliable store_description = "Reliable filesystem backend" [fast] store_description = "Fast rbd backend" rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = admin rbd_store_ceph_conf = /etc/ceph/ceph.conf rados_connect_timeout = 0 [cheap] store_description = "Cheap rbd backend" rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = admin rbd_store_ceph_conf = /etc/ceph/ceph1.conf rados_connect_timeout = 0 .. note :: ``store_description`` is a new config option added to each store where operator can add meaningful description about that store. This description is displayed in the GET /v2/info/stores response. Store Configuration Issues ~~~~~~~~~~~~~~~~~~~~~~~~~~ Please keep the following points in mind. * Due to the special read only nature and characteristics of the http store type, configuring multiple instances of the http type store **is not supported**. (This constraint is not currently enforced in the code.) * Each instance of the filesystem store **must** have a different value for the ``filesystem_store_datadir``. (This constraint is not currently enforced in the code.) .. _reserved_stores: Reserved Stores --------------- With the Train release, Glance is beginning a transition from its former reliance upon local directories for temporary data storage to the ability to use backend stores accessed via the glance_store library. In the Train release, the use of backend stores for this purpose is optional **unless you are using the multi store support feature**. Since you are reading this document, this situation most likely applies to you. .. note:: Currently, only the filesystem store type is supported as a Glance reserved store. The reserved stores are not intended to be exposed to end users. Thus they will not appear in the response to the store discovery call, GET /v2/info/stores, or as values in the ``OpenStack-image-store-ids`` response header of the image-create call. You do not get to select the name of a reserved store; these are defined by Glance and begin with the prefix ``os_glance_``. In the Train release, you do not get to select the store type: all reserved stores must be of type filesystem. Currently, there are two reserved stores: ``os_glance_tasks_store`` This store is used for the tasks engine. It replaces the use of the DEPRECATED configuration option ``[task]/work_dir``. ``os_glance_staging_store`` This store is used for the staging area for the interoperable image import process. It replaces the use of the DEPRECATED configuration option ``[DEFAULT]/node_staging_uri``. .. note:: If end user wants to retrieve all the available stores using ``CONF.enabled_backends`` then they need to remove reserved stores from that list explicitly. Configuration ~~~~~~~~~~~~~ As mentioned above, you do not get to select the name or the type of a reserved store (though we anticipate that you will be able configure the store type in a future release). The reserved stores *must* be of type filesystem. Hence, you must provide configuration for them in your ``glance-api.conf`` file. You do this by introducing a section in ``glance-api.conf`` for each reserved store as follows: .. code-block:: ini [os_glance_tasks_store] filesystem_store_datadir = /var/lib/glance/tasks_work_dir [os_glance_staging_store] filesystem_store_datadir = /var/lib/glance/staging Since these are both filesystem stores (remember, you do not get a choice) the only option you must configure for each is the ``filesystem_store_datadir``. Please keep the following points in mind: * The path for ``filesystem_store_datadir`` used for the reserved stores must be **different** from the path you are using for any filesystem store you have listed in ``enabled_backends``. Using the same data directory for multiple filesystem stores is **unsupported** and may lead to data loss. * The identifiers for reserved stores, that is, ``os_glance_tasks_store`` and ``os_glance_staging_store``, must **not** be included in the ``enabled_backends`` list. * The reserved stores will **not** appear in the store discovery response or as values in the ``OpenStack-image-store-ids`` response header of the image-create call. * The reserved stores will **not** be accepted as the value of the ``X-Image-Meta-Store`` header on the image-data-upload call or the image-import call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/new-location-apis.rst0000664000175000017500000001161500000000000022302 0ustar00zuulzuul00000000000000.. Copyright 2024 RedHat Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _new_location_apis: New Location APIs Support ========================= Version 2.17 of the Image Service API introduces new Location API calls which mitigate the security issues `OSSN-0090 `_ and `OSSN-0065 `_. Below are the 2 new locations api glance has introduced in 2023.2 cycle, Add Location ------------ Add location API is introduced to add the location to an image. Add location operation is only allowed for service to service interaction and image owner, when image is in ``queued`` state only. Attempt to add location for image in other states will be rejected. This is done in order to prevent malicious users from modifying the image location again and again since the location added for the first time is the correct one as far as Glance is concerned. The use case for old location API for consumers (nova and cinder) is to create images efficiently with an optimized workflow. This workflow avoids the hash calculation steps which exists in the generic image create workflow of glance leading to missing checksum and hash information for those images. As a result, those images were never cached, as a checksum was required to validate whether the image is completely cached or not. Adding this mechanism to calculate the checksum and hash for the image has not only resolve this issue but it will also improve caching operations since the checksum of the original and a cached image is compared only when the entire image was downloaded in the cache. As the hashing calculation and its verification are time-consuming, we provide a configuration option to enable/disable this operation. The new configuration option ``do_secure_hash`` has been introduced to control this operation. The value of ``do_secure_hash`` is ``True`` by default. This operation can be disabled by turning this flag to ``False``. For similar reasons, the hashing calculation will be performed in the background so that consumers or clients need not to wait for its completion. If the hash calculation fails, we have a retry mechanism that will retry the operation as per the value defined of the configuration option ``http_retries`` in the glance-api.conf file. The default value is ``3``. The operation will be silently ignored if it fails even after the maximum retries as defined with the ``http_retries`` configuration option. Similar to the old location API, users (not consumers like Nova or Cinder) can also pass hashing values as an input to this new API using validation_data, either it should be supplied from glance client, as a command line argument or should be provided in the request body when doing direct API request. In this case, if hashing is enabled in the deployment(i.e., ``do_secure_hash`` is True) then it will validate the calculated hash values with validation_data and marks the operation as failed if there is a difference. If hashing is disabled, (i.e., ``do_secure_hash`` is False) then values provided in validation_data will be set directly to the image. If hashing is disabled for this API, then we will have an active image, but again it will fail to cache, so Glance recommends consumers like Nova and Cinder as well as normal users should keep do_secure_hash enabled. .. note:: Usage of this API for end users is only allowed if http store is enabled in the deployment. .. note:: In case of ``http`` store, if bad value is passed to ``os_hash_value`` in validation data, image remains in ``queued`` state as verification of validation_data fails which is expected but it stores location of the image which should to be popped out instead. The location doesn't get deleted because deletion of location is not allowed for ``http`` store. Here image needs to be deleted as it's of no use. Get Locations ------------- Get locations API will return the list of the locations associated to the image. This API is introduced to get the locations associated to an image to abstract the location information from end users so that they are not able to see where exactly the image is stored. Get locations operation is strictly allowed for service to service interaction only, meaning only consumers like nova, cinder etc. will be able to access this API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/notifications.rst0000664000175000017500000001236600000000000021626 0ustar00zuulzuul00000000000000.. Copyright 2011-2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _notifications: Notifications ============= Notifications can be generated for several events in the image lifecycle. These can be used for auditing, troubleshooting, etc. Notification Drivers -------------------- * log This driver uses the standard Python logging infrastructure with the notifications ending up in file specified by the log_file configuration directive. * messaging This strategy sends notifications to a message queue configured using oslo.messaging configuration options. * noop This strategy produces no notifications. It is the default strategy. Notification Types ------------------ * ``image.create`` Emitted when an image record is created in Glance. Image record creation is independent of image data upload. * ``image.prepare`` Emitted when Glance begins uploading image data to its store. * ``image.upload`` Emitted when Glance has completed the upload of image data to its store. * ``image.activate`` Emitted when an image goes to `active` status. This occurs when Glance knows where the image data is located. * ``image.send`` Emitted upon completion of an image being sent to a consumer. * ``image.update`` Emitted when an image record is updated in Glance. * ``image.delete`` Emitted when an image deleted from Glance. * ``task.run`` Emitted when a task is picked up by the executor to be run. * ``task.processing`` Emitted when a task is sent over to the executor to begin processing. * ``task.success`` Emitted when a task is successfully completed. * ``task.failure`` Emitted when a task fails. Content ------- Every message contains a handful of attributes. * message_id UUID identifying the message. * publisher_id The hostname of the glance instance that generated the message. * event_type Event that generated the message. * priority One of WARN, INFO or ERROR. * timestamp UTC timestamp of when event was generated. * payload Data specific to the event type. Payload ------- * image.send The payload for INFO, WARN, and ERROR events contain the following: image_id ID of the image (UUID) owner_id Tenant or User ID that owns this image (string) receiver_tenant_id Tenant ID of the account receiving the image (string) receiver_user_id User ID of the account receiving the image (string) destination_ip The receiver's IP address to which the image was sent (string) bytes_sent The number of bytes actually sent * image.create For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.prepare For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.upload For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.activate For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.update For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.delete For INFO events, it is the image id. WARN and ERROR events contain a text message in the payload. * task.run The payload for INFO, WARN, and ERROR events contain the following: task_id ID of the task (UUID) owner Tenant or User ID that created this task (string) task_type Type of the task. Example, task_type is "import". (string) status, status of the task. Status can be "pending", "processing", "success" or "failure". (string) task_input Input provided by the user when attempting to create a task. (dict) result Resulting output from a successful task. (dict) message Message shown in the task if it fails. None if task succeeds. (string) expires_at UTC time at which the task would not be visible to the user. (string) created_at UTC time at which the task was created. (string) updated_at UTC time at which the task was latest updated. (string) The exceptions are:- For INFO events, it is the task dict with result and message as None. WARN and ERROR events contain a text message in the payload. * task.processing For INFO events, it is the task dict with result and message as None. WARN and ERROR events contain a text message in the payload. * task.success For INFO events, it is the task dict with message as None and result is a dict. WARN and ERROR events contain a text message in the payload. * task.failure For INFO events, it is the task dict with result as None and message is text. WARN and ERROR events contain a text message in the payload. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/os_hash_algo.rst0000664000175000017500000000244200000000000021375 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================= Secure Hash Algorithm Support (Multihash) ========================================= The Secure Hash Algorithm feature supplements the current ‘checksum’ image property with a self-describing secure hash. The self-description consists of two new image properties: ``os_hash_algo`` Contains the name of the secure hash algorithm used to generate the value on the image ``os_hash_value`` The hexdigest computed by applying the secure hash algorithm named in the ``os_hash_algo`` property to the image data Hash Algorithm Configuration ============================ ``os_hash_algo`` will be populated by the value of the configuration option ``hashing_algorithm`` in the ``glance.conf`` file. The ``os_hash_value`` value will be populated by the hexdigest computed when the algorithm is applied to the uploaded or imported image data. These are read-only image properties and are not user-modifiable. The default secure hash algorithm is SHA-512. It should be suitable for most applications. The multihash is computed only for new images. There is no provision for computing the multihash for existing images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/policies.rst0000664000175000017500000001411100000000000020552 0ustar00zuulzuul00000000000000.. Copyright 2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Policies ======== .. warning:: JSON formatted policy file is deprecated since Glance 22.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Glance's public API calls may be restricted to certain sets of users using a policy configuration file. This document explains exactly how policies are configured and what they apply to. A policy is composed of a set of rules that are used by the policy "Brain" in determining if a particular action may be performed by the authorized tenant. Constructing a Policy Configuration File ---------------------------------------- A policy configuration file is a simply JSON object that contain sets of rules. Each top-level key is the name of a rule. Each rule is a string that describes an action that may be performed in the Glance API. The actions that may have a rule enforced on them are: * ``get_images`` - List available image entities * ``GET /v1/images`` * ``GET /v1/images/detail`` * ``GET /v2/images`` * ``get_image`` - Retrieve a specific image entity * ``HEAD /v1/images/`` * ``GET /v1/images/`` * ``GET /v2/images/`` * ``download_image`` - Download binary image data * ``GET /v1/images/`` * ``GET /v2/images//file`` * ``upload_image`` - Upload binary image data * ``POST /v1/images`` * ``PUT /v1/images/`` * ``PUT /v2/images//file`` * ``copy_from`` - Copy binary image data from URL * ``POST /v1/images`` * ``PUT /v1/images/`` * ``add_image`` - Create an image entity * ``POST /v1/images`` * ``POST /v2/images`` * ``modify_image`` - Update an image entity * ``PUT /v1/images/`` * ``PUT /v2/images/`` * ``publicize_image`` - Create or update public images * ``POST /v1/images`` with attribute ``is_public`` = ``true`` * ``PUT /v1/images/`` with attribute ``is_public`` = ``true`` * ``POST /v2/images`` with attribute ``visibility`` = ``public`` * ``PUT /v2/images/`` with attribute ``visibility`` = ``public`` * ``communitize_image`` - Create or update community images * ``POST /v2/images`` with attribute ``visibility`` = ``community`` * ``PUT /v2/images/`` with attribute ``visibility`` = ``community`` * ``delete_image`` - Delete an image entity and associated binary data * ``DELETE /v1/images/`` * ``DELETE /v2/images/`` * ``add_member`` - Add a membership to the member repo of an image * ``POST /v2/images//members`` * ``get_members`` - List the members of an image * ``GET /v1/images//members`` * ``GET /v2/images//members`` * ``delete_member`` - Delete a membership of an image * ``DELETE /v1/images//members/`` * ``DELETE /v2/images//members/`` * ``modify_member`` - Create or update the membership of an image * ``PUT /v1/images//members/`` * ``PUT /v1/images//members`` * ``POST /v2/images//members`` * ``PUT /v2/images//members/`` * ``manage_image_cache`` - Allowed to use the image cache management API To limit an action to a particular role or roles, you list the roles like so :: { "delete_image": ["role:admin", "role:superuser"] } The above would add a rule that only allowed users that had roles of either "admin" or "superuser" to delete an image. Writing Rules ------------- Role checks are going to continue to work exactly as they already do. If the role defined in the check is one that the user holds, then that will pass, e.g., ``role:admin``. To write a generic rule, you need to know that there are three values provided by Glance that can be used in a rule on the left side of the colon (``:``). Those values are the current user's credentials in the form of: - role - tenant - owner The left side of the colon can also contain any value that Python can understand, e.g.,: - ``True`` - ``False`` - ``"a string"`` - &c. Using ``tenant`` and ``owner`` will only work with images. Consider the following rule:: tenant:%(owner)s This will use the ``tenant`` value of the currently authenticated user. It will also use ``owner`` from the image it is acting upon. If those two values are equivalent the check will pass. All attributes on an image (as well as extra image properties) are available for use on the right side of the colon. The most useful are the following: - ``owner`` - ``protected`` - ``is_public`` Therefore, you could construct a set of rules like the following:: { "not_protected": "False:%(protected)s", "is_owner": "tenant:%(owner)s", "is_owner_or_admin": "rule:is_owner or role:admin", "not_protected_and_is_owner": "rule:not_protected and rule:is_owner", "get_image": "rule:is_owner_or_admin", "delete_image": "rule:not_protected_and_is_owner", "add_member": "rule:not_protected_and_is_owner" } Examples -------- Example 1. (The default policy configuration) :: { "default": "" } Note that an empty JSON list means that all methods of the Glance API are callable by anyone. Example 2. Disallow modification calls to non-admins :: { "default": "", "add_image": "role:admin", "modify_image": "role:admin", "delete_image": "role:admin" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/property-protections.rst0000664000175000017500000000736000000000000023206 0ustar00zuulzuul00000000000000.. Copyright 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _property-protections: Property Protections ==================== There are two types of image properties in Glance: * Core Properties, as specified by the image schema. * Meta Properties, which are arbitrary key/value pairs that can be added to an image. Access to meta properties through Glance's public API calls may be restricted to certain sets of users, using a property protections configuration file. Glance also reserves the ``os_glance`` namespace of meta properties for its own use, and will refuse to let an API user set any property prefixed as such. This document explains exactly how property protections are configured and what they apply to. Constructing a Property Protections Configuration File ------------------------------------------------------ A property protections configuration file follows the format of the Glance API configuration file, which consists of sections, led by a ``[section]`` header and followed by ``name = value`` entries. Each section header is a regular expression matching a set of properties to be protected. .. note:: Section headers must compile to a valid regular expression, otherwise glance api service will not start. Regular expressions will be handled by python's re module which is PERL like. Each section describes four key-value pairs, where the key is one of ``create/read/update/delete``, and the value is a comma separated list of user roles that are permitted to perform that operation in the Glance API. **If any of the keys are not specified, then the glance api service will not start successfully.** In the list of user roles, ``@`` means all roles and ``!`` means no role. **If both @ and ! are specified for the same rule then the glance api service will not start** .. note:: Only one policy rule is allowed per property operation. **If multiple are specified, then the glance api service will not start.** The path to the file should be specified in the ``[DEFAULT]`` section of ``glance-api.conf`` as follows. :: property_protection_file=/path/to/file If this config value is not specified, property protections are not enforced. **If the path is invalid, glance api service will not start successfully.** The file may use either roles or policies to describe the property protections. The config value should be specified in the ``[DEFAULT]`` section of ``glance-api.conf`` as follows. :: property_protection_rule_format= The default value for ``property_protection_rule_format`` is ``roles``. Property protections are applied in the order specified in the configuration file. This means that if for example you specify a section with ``[.*]`` at the top of the file, all proceeding sections will be ignored. If a property does not match any of the given rules, all operations will be disabled for all roles. If an operation is misspelled or omitted, that operation will be disabled for all roles. Disallowing ``read`` operations will also disallow ``update/delete`` operations. A successful HTTP request will return status ``200 OK``. If the user is not permitted to perform the requested action, ``403 Forbidden`` will be returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/quotas.rst0000664000175000017500000001657600000000000020300 0ustar00zuulzuul00000000000000.. Copyright 2021 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. versionadded:: 23.0.0 (Xena) This functionality was first introduced in the 23.0.0 (Xena) release. Prior to this, only global resource limits were supported. .. _quotas: Per-Tenant Quotas ================= Glance supports resource consumption quotas on tenants through the use of Keystone's unified limits functionality. Resource limits are *registered* in Keystone with suitable default values, and may be overridden on a per-tenant basis. When a resource consumption attempt is made in Glance, the current consumption is computed and compared against the limit set in Keystone; the request is denied if the user is over the specified limit. Due to the design of Glance, most of the storage-focused quotas in Glance are **soft limits**. Since Glance allows clients to stream image data of unknown total size during an upload or import operation, it is not possible to determine if quota has been exceeded until *after* the operation has completed. Thus, a user is permitted to go over their quota for a single operation, and then denied additional stored on subsequent operations. There are object-focused quotas that can help operators limit the damage caused by multiple large competing data streams. Those details are covered below. .. note:: Glance also has legacy global resource limits that may be ignored if per-tenant quotas are enabled. Currently the ``user_storage_quota`` limit will be ignored if per-tenant quotas are used. See the Keystone docs for more information on `unified limits `_. Quota Resource Types -------------------- Glance supports quota limits on multiple areas of resource consumption. Limits are enforced at the time in which resource consumption is attempted, so setting an existing user's quota for any item below the current usage will only prevent them from consuming *more* data until they free up space. Total Image Size ~~~~~~~~~~~~~~~~ The ``image_size_total`` limit defines the maximum amount of storage (in MiB) that the tenant may consume across all of their active images. Images with multiple locations contribute to this count according to the number of places the image is stored. Thus, if you have a single 1GiB image stored in four locations, the usage will be considered to be 4GiB. Total Staging Size ~~~~~~~~~~~~~~~~~~ The :ref:`iir` function uses a two-step upload process, whereby a user first uploads an image into the *staging* store, and then subsequently *imports* the image to the final destination(s). The staging store is generally local storage on the API workers themselves, and thus is likely at somewhat of a premium, compared to the bulk shared storage allocated for general images. The ``image_stage_total`` limit defines the total amount of staging space that may be used. This should be set to a value sufficient to allow a user to import one or more images at the same time, according to your desired level of parallelism. It may be appropriate to provide the user with a very generous ``image_size_total`` quota, but a relatively restrictive ``image_stage_total`` allocation, effectively limiting them to one image being imported at any given point. Keep in mind that images being imported using the ``web-download`` method will need to fit within this allocation as well, as those are first downloaded to the staging store before being imported to the final destination(s). Images being copied from one store to another using the ``copy-image`` method are similarly affected. Note that the conventional image upload method does not stage the image, and thus is not impacted by this limit. Total Number of Images ~~~~~~~~~~~~~~~~~~~~~~ The ``image_count_total`` limit controls the maximum number of image objects that the user may have, regardless of the individual or collective sizes or impact to storage. This limit may be useful if you wish to prevent users from taking thousands of small server snapshots without ever deleting them. Total Number of In-Progress Uploads ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Because Glance can not enforce storage-focused quotas until after a stream is finished, it may be useful to limit the number of parallel upload operations that can be in-progress at any single point. The ``image_count_uploading`` limit provides this control, and affects conventional image upload, pre-import stage (including ``web-download`` and ``glance-direct``), as well as any ``copy-image`` operations that may be pending. It may be desirable to limit untrusted users to a single in-progress image upload, which will limit the amount of damage a malicious user may be able to inflict on your image storage if they initiate multiple simultaneous unbounded upload streams. Quota Strategies ---------------- Below are a couple of use-case example strategies for different types of deployments. In all cases, it makes sense for ``image_size_total`` and ``image_stage_total`` to be set to at least the size of the largest image you expect a user to use. The global limit on a single image (see configuration item ``image_size_cap``) may be relevant as well. Users with an ``image_count_total`` of zero will be unable to create any images, and with an ``image_count_uploading`` of zero will be able to upload data to any images. #. **Public cloud, users are billed per-byte**: In this case, it probably makes sense to set fairly high default quota limits for each of the above resource classes, allowing users to consume as much as they are willing to pay for. It still may be desirable to set ``image_stage_total`` to something modest to prevent overrunning limited staging space, if you have import enabled. #. **Private cloud, trusted users are billed by quota**: In this case, each user pays for the amount of resource they are *allowed* to consume, instead of what they *are* consuming. Generally this involves billing total space, so ``image_size_total`` is set to their allotment, potentially with some upper bound on total images via ``image_count_total``. If they are somewhat trusted or low-impact customers, limiting the staging usage and upload count is probably not necessary, and can be left unbounded or set to some high upper bound. #. **Private cloud, semi-trusted third party users**: This case may be similar to either of the above in terms of paying for allotment or strict usage. However, the lack of full trust may suggest limiting the total number of image uploads to something like 10% of their compute quota (to allow for snapshots) and limiting staging usage to enough for one or two image imports at a time. Configuring Glance for Per-Tenant Quotas ---------------------------------------- #. Register quota limits (optional): .. include:: ../install/register-quotas.rst #. Tell Glance to use Keystone quotas .. include:: ../install/configure-quotas.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/requirements.rst0000664000175000017500000000652300000000000021476 0ustar00zuulzuul00000000000000.. Copyright 2016-present OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Requirements ============ External Requirements Affecting Glance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like other OpenStack projects, Glance uses some external libraries for a subset of its features. Some examples include the ``qemu-img`` utility used by the tasks feature, ``pydev`` to debug using popular IDEs, ``python-xattr`` for Image Cache using "xattr" driver. On the other hand, if ``dnspython`` is installed in the environment, Glance provides a workaround to make it work with IPV6. Additionally, some libraries like ``xattr`` are not compatible when using Glance on Windows (see :ref:`the documentation on config options affecting the Image Cache `). Guideline to include your requirement in the requirements.txt file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described above, we don't include all the possible requirements needed by Glance features in the source tree requirements file. So, when an operator decides to use an **advanced feature** in Glance, we ask them to check the documentation/guidelines for those features to set up the feature in a workable way. In order to reduce the operator pain, the development team likes to work with different operators to figure out when a popular feature should have its dependencies included in the requirements file. However, there's a tradeoff in including more of requirements in source tree as it becomes more painful for packagers. So, it is a bit of a haggle among different stakeholders and a judicious decision is taken by the project PTL or release liaison to determine the outcome. To simplify the identification of an **advanced feature** in Glance we can think of it as something not being used and deployed by most of the upstream/known community members. To name a few features that have been identified as advanced: * glance tasks * image signing * image prefetcher * glance db purge utility * image locations Steps to include your requirement in the requirements.txt file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. First step is to propose a change against the ``openstack/requirements`` project to include the requirement(s) as a part of ``global-requirements`` and ``upper-constraints`` files. 2. If your requirement is not a part of the project, you will have to propose a change adding that requirement to the requirements.txt file in Glance. Please include a ``Depends-On: `` flag in the commit message, where the ``ChangeID`` is the gerrit ID of corresponding change against ``openstack/requirements`` project. 3. A sync bot then syncs the global requirements into project requirements on a regular basis, so any updates to the requirements are synchronized on a timely basis. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/rollingupgrades.rst0000664000175000017500000001270100000000000022147 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _rolling-upgrades: Rolling Upgrades ================ .. note:: The Rolling Upgrades feature is EXPERIMENTAL and its use in production systems is currently **not supported**. This statement remains true for the Queens release of Glance. What is the holdup, you ask? Before asserting that the feature is fully supported, the Glance team needs to have automated tests that perform rolling upgrades in the OpenStack Continuous Integration gates. The Glance project team has not had sufficient testing and development resources in recent cycles to prioritize this work. The Glance project team is committed to the stability of Glance. As part of OpenStack, we are committed to `The Four Opens`_. If the ability to perform rolling upgrades in production systems is important to you, feel free to participate in the Glance community to help coordinate and drive such an effort. (We gently remind you that "participation" includes providing testing and development resources.) .. _`The Four Opens`: https://governance.openstack.org/tc/reference/opens.html Scope of this document ---------------------- This page describes one way to perform a rolling upgrade from Newton to Ocata for a particular configuration of Glance services. There may be other ways to perform a rolling upgrade from Newton to Ocata for other configurations of Glance services, but those are beyond the scope of this document. For the experimental rollout of rolling upgrades, we describe only the following simple case. Prerequisites ------------- * MySQL/MariaDB 5.5 or later * Glance running Images API v2 only * Glance not using the Glance Registry * Multiple Glance nodes * A load balancer or some other type of redirection device is being used in front of the Glance nodes in such a way that a node can be dropped out of rotation, that is, that Glance node continues running the Glance service but is no longer having requests routed to it Procedure --------- Following is the process to upgrade Glance with zero downtime: 1. Backup the Glance database. 2. Choose an arbitrary Glance node or provision a new node to install the new release. If an existing Glance node is chosen, gracefully stop the Glance services. In what follows, this node will be referred to as the NEW NODE. .. _Stop the Glance processes gracefully: .. note:: **Gracefully stopping services** Before stopping the Glance processes on a node, one may choose to wait until all the existing connections drain out. This could be achieved by taking the node out of rotation, that is, by ensuring that requests are no longer routed to that node. This way all the requests that are currently being processed will get a chance to finish processing. However, some Glance requests like uploading and downloading images may last a long time. This increases the wait time to drain out all connections and consequently the time to upgrade Glance completely. On the other hand, stopping the Glance services before the connections drain out will present the user with errors. While arguably this is not downtime given that Images API requests are continually being serviced by other nodes, this is nonetheless an unpleasant user experience for the user whose in-flight request has terminated in an error. Hence, an operator must be judicious when stopping the services. 3. Upgrade the NEW NODE with new release and update the configuration accordingly. **DO NOT** start the Glance services on the NEW NODE at this time. 4. Using the NEW NODE, expand the database using the command:: glance-manage db expand .. warning:: For MySQL, using the ``glance-manage db_expand`` command requires that you either grant your glance user ``SUPER`` privileges, or run ``set global log_bin_trust_function_creators=1;`` in mysql beforehand. 5. Then, also on the NEW NODE, perform the data migrations using the command:: glance-manage db migrate *The data migrations must be completed before you proceed to the next step.* 6. Start the Glance processes on the NEW NODE. It is now ready to receive traffic from the load balancer. 7. Taking one node at a time from the remaining nodes, for each node: a. `Stop the Glance processes gracefully`_ as described in Step 2, above. *Do not proceed until the "old" Glance services on the node have been completely shut down.* b. Upgrade the node to the new release (and corresponding configuration). c. Start the updated Glance processes on the upgraded node. 8. After **ALL** of the nodes have been upgraded to run the new Glance services, and there are **NO** nodes running any old Glance services, contract the database by running the command from any one of the upgraded nodes:: glance-manage db contract ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/tasks.rst0000664000175000017500000001561500000000000020102 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _tasks: Tasks ===== Conceptual Overview ------------------- Image files can be quite large, and processing images (converting an image from one format to another, for example) can be extremely resource intensive. Additionally, a one-size-fits-all approach to processing images is not desirable. A public cloud will have quite different security concerns than, for example, a small private cloud run by an academic department in which all users know and trust each other. Thus a public cloud deployer may wish to run various validation checks on an image that a user wants to bring in to the cloud, whereas the departmental cloud deployer may view such processing as a waste of resources. To address this situation, Glance contains *tasks*. Tasks are intended to offer end users a front end to long running asynchronous operations -- the type of operation you kick off and don't expect to finish until you've gone to the coffee shop, had a pleasant chat with your barista, had a coffee, had a pleasant walk home, etc. The asynchronous nature of tasks is emphasized up front in order to set end user expectations with respect to how long the task may take (hint: longer than other Glance operations). Having a set of operations performed by tasks allows a deployer flexibility with respect to how many operations will be processed simultaneously, which in turn allows flexibility with respect to what kind of resources need to be set aside for task processing. Thus, although large cloud deployers are certainly interested in tasks for the alternative custom image processing workflow they enable, smaller deployers find them useful as a means of controlling resource utilization. An additional reason tasks have been introduced into Glance is to support Glance's role in the OpenStack ecosystem. Glance provides cataloging, storage, and delivery of virtual machine images. As such, it needs to be responsive to other OpenStack components. Nova, for instance, requests images from Glance in order to boot instances; it uploads images to Glance as part of its workflow for the Nova image-create action; and it uses Glance to provide the data for the image-related API calls that are defined in the Compute API that Nova instantiates. It is necessary to the proper functioning of an OpenStack cloud that these synchronous operations not be compromised by excess load caused by non-essential functionality such as image import. By separating the tasks resource from the images resource in the Images API, it's easier for deployers to allocate resources and route requests for tasks separately from the resources required to support Glance's service role. At the same time this separation avoids confusion for users of an OpenStack cloud. Responses to requests to ``/v2/images`` should return fairly quickly, while requests to ``/v2/tasks`` may take a while. In short, tasks provide a common API across OpenStack installations for users of an OpenStack cloud to request image-related operations, yet at the same time tasks are customizable for individual cloud providers. Conceptual Details ------------------ A Glance task is a request to perform an asynchronous image-related operation. The request results in the creation of a *task resource* that can be polled for information about the status of the operation. A specific type of resource distinct from the traditional Glance image resource is appropriate here for several reasons: * A dedicated task resource can be developed independently of the traditional Glance image resource, both with respect to structure and workflow. * There may be multiple tasks (for example, image export or image conversion) operating on an image simultaneously. * A dedicated task resource allows for the delivery to the end user of clear, detailed error messages specific to the particular operation. * A dedicated task resource respects the principle of least surprise. For example, an import task does not create an image in Glance until it's clear that the bits submitted pass the deployer's tests for an allowable image. Upon reaching a final state (``success`` or ``error``) a task resource is assigned an expiration datetime that's displayed in the ``expires_at`` field. (The time between final state and expiration is configurable.) After that datetime, the task resource is subject to being deleted. The result of the task (for example, an imported image) will still exist. For details about the defined task statuses, please see :ref:`task-statuses`. Tasks expire eventually because there's no reason to keep them around, as the user will have the result of the task, which was the point of creating the task in the first place. The reason tasks aren't instantly deleted is that there may be information contained in the task resource that's not easily available elsewhere. (For example, a successful import task will eventually result in the creation of an image in Glance, and it would be useful to know the UUID of this image. Similarly, if the import task fails, we want to give the end user time to read the task resource to analyze the error message.) Task Entities ------------- A task entity is represented by a JSON-encoded data structure defined by the JSON schema available at ``/v2/schemas/task``. A task entity has an identifier (``id``) that is guaranteed to be unique within the endpoint to which it belongs. The id is used as a token in request URIs to interact with that specific task. In addition to the usual properties you'd expect (for example, ``created_at``, ``self``, ``type``, ``status``, ``updated_at``, etc.), tasks have these properties of interest: * ``input``: this is defined to be a JSON blob, the exact content of which will depend upon the requirements set by the specific cloud deployer. The intent is that each deployer will document these requirements for end users. * ``result``: this is also defined to be a JSON blob, the content of which will be documented by each cloud deployer. The ``result`` element will be null until the task has reached a final state, and if the final status is ``failure``, the result element remains null. * ``message``: this string field is expected to be null unless the task has entered ``failure`` status. At that point, it contains an informative human-readable message concerning the reason(s) for the task failure. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/troubleshooting.rst0000664000175000017500000004004300000000000022175 0ustar00zuulzuul00000000000000==================== Images and instances ==================== Virtual machine images contain a virtual disk that holds a bootable operating system on it. Disk images provide templates for virtual machine file systems. The Image service controls image storage and management. Instances are the individual virtual machines that run on physical compute nodes inside the cloud. Users can launch any number of instances from the same image. Each launched instance runs from a copy of the base image. Any changes made to the instance do not affect the base image. Snapshots capture the state of an instances running disk. Users can create a snapshot, and build a new image based on these snapshots. The Compute service controls instance, image, and snapshot storage and management. When you launch an instance, you must choose a ``flavor``, which represents a set of virtual resources. Flavors define virtual CPU number, RAM amount available, and ephemeral disks size. Users must select from the set of available flavors defined on their cloud. OpenStack provides a number of predefined flavors that you can edit or add to. .. note:: - For more information about creating and troubleshooting images, see the `OpenStack Virtual Machine Image Guide `__. - For more information about image configuration options, see the `Image services <../configuration/index.html>`__ section of the OpenStack Configuration Reference. You can add and remove additional resources from running instances, such as persistent volume storage, or public IP addresses. The example used in this chapter is of a typical virtual system within an OpenStack cloud. It uses the ``cinder-volume`` service, which provides persistent block storage, instead of the ephemeral storage provided by the selected instance flavor. This diagram shows the system state prior to launching an instance. The image store has a number of predefined images, supported by the Image service. Inside the cloud, a compute node contains the available vCPU, memory, and local disk resources. Additionally, the ``cinder-volume`` service stores predefined volumes. | .. _Figure Base Image: **The base image state with no running instances** .. figure:: ../images/instance-life-1.png | Instance Launch ~~~~~~~~~~~~~~~ To launch an instance, select an image, flavor, and any optional attributes. The selected flavor provides a root volume, labeled ``vda`` in this diagram, and additional ephemeral storage, labeled ``vdb``. In this example, the ``cinder-volume`` store is mapped to the third virtual disk on this instance, ``vdc``. | .. _Figure Instance creation: **Instance creation from an image** .. figure:: ../images/instance-life-2.png | The Image service copies the base image from the image store to the local disk. The local disk is the first disk that the instance accesses, which is the root volume labeled ``vda``. Smaller instances start faster. Less data needs to be copied across the network. The new empty ephemeral disk is also created, labeled ``vdb``. This disk is deleted when you delete the instance. The compute node connects to the attached ``cinder-volume`` using iSCSI. The ``cinder-volume`` is mapped to the third disk, labeled ``vdc`` in this diagram. After the compute node provisions the vCPU and memory resources, the instance boots up from root volume ``vda``. The instance runs and changes data on the disks (highlighted in red on the diagram). If the volume store is located on a separate network, the ``my_block_storage_ip`` option specified in the storage node configuration file directs image traffic to the compute node. .. note:: Some details in this example scenario might be different in your environment. For example, you might use a different type of back-end storage, or different network protocols. One common variant is that the ephemeral storage used for volumes ``vda`` and ``vdb`` could be backed by network storage rather than a local disk. When you delete an instance, the state is reclaimed with the exception of the persistent volume. The ephemeral storage, whether encrypted or not, is purged. Memory and vCPU resources are released. The image remains unchanged throughout this process. | .. _End of state: **The end state of an image and volume after the instance exits** .. figure:: ../images/instance-life-3.png | Image properties and property protection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An image property is a key and value pair that the administrator or the image owner attaches to an OpenStack Image service image, as follows: - The administrator defines core properties, such as the image name. - The administrator and the image owner can define additional properties, such as licensing and billing information. The administrator can configure any property as protected, which limits which policies or user roles can perform CRUD operations on that property. Protected properties are generally additional properties to which only administrators have access. Further, Glance itself reserves properties namespaced with the ``os_glance`` prefix for its own use. For unprotected image properties, the administrator can manage core properties and the image owner can manage additional properties. **To configure property protection** To configure property protection, edit the ``policy.yaml`` file. This file can also be used to set policies for Image service actions. #. Define roles or policies in the ``policy.yaml`` file: .. code-block:: json { "context_is_admin": "role:admin", "default": "", "add_image": "", "delete_image": "", "get_image": "", "get_images": "", "modify_image": "", "publicize_image": "role:admin", "copy_from": "", "download_image": "", "upload_image": "", "delete_image_location": "", "get_image_location": "", "set_image_location": "", "add_member": "", "delete_member": "", "get_member": "", "get_members": "", "modify_member": "", "manage_image_cache": "role:admin", "get_task": "", "get_tasks": "", "add_task": "", "modify_task": "", "deactivate": "", "reactivate": "", "get_metadef_namespace": "", "get_metadef_namespaces":"", "modify_metadef_namespace":"", "add_metadef_namespace":"", "delete_metadef_namespace":"", "get_metadef_object":"", "get_metadef_objects":"", "modify_metadef_object":"", "add_metadef_object":"", "delete_metadef_object":"", "list_metadef_resource_types":"", "get_metadef_resource_type":"", "add_metadef_resource_type_association":"", "remove_metadef_resource_type_association":"", "get_metadef_property":"", "get_metadef_properties":"", "modify_metadef_property":"", "add_metadef_property":"", "remove_metadef_property":"", "get_metadef_tag":"", "get_metadef_tags":"", "modify_metadef_tag":"", "add_metadef_tag":"", "add_metadef_tags":"", "delete_metadef_tag":"", "delete_metadef_tags":"" } For each parameter, use ``"rule:restricted"`` to restrict access to all users or ``"role:admin"`` to limit access to administrator roles. For example: .. code-block:: json { "download_image": "upload_image": } #. Define which roles or policies can manage which properties in a property protections configuration file. For example: .. code-block:: ini [x_none_read] create = context_is_admin read = ! update = ! delete = ! [x_none_update] create = context_is_admin read = context_is_admin update = ! delete = context_is_admin [x_none_delete] create = context_is_admin read = context_is_admin update = context_is_admin delete = ! - A value of ``@`` allows the corresponding operation for a property. - A value of ``!`` disallows the corresponding operation for a property. #. In the ``glance-api.conf`` file, define the location of a property protections configuration file. .. code-block:: ini property_protection_file = {file_name} This file contains the rules for property protections and the roles and policies associated with it. By default, property protections are not enforced. If you specify a file name value and the file is not found, the ``glance-api`` service does not start. To view a sample configuration file, see `glance-api.conf <../configuration/glance_api.html>`__. #. Optionally, in the ``glance-api.conf`` file, specify whether roles or policies are used in the property protections configuration file .. code-block:: ini property_protection_rule_format = roles The default is ``roles``. To view a sample configuration file, see `glance-api.conf <../configuration/glance_api.html>`__. Image download: how it works ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prior to starting a virtual machine, transfer the virtual machine image to the compute node from the Image service. How this works can change depending on the settings chosen for the compute node and the Image service. Typically, the Compute service will use the image identifier passed to it by the scheduler service and request the image from the Image API. Though images are not stored in glance—rather in a back end, which could be Object Storage, a filesystem or any other supported method—the connection is made from the compute node to the Image service and the image is transferred over this connection. The Image service streams the image from the back end to the compute node. It is possible to set up the Object Storage node on a separate network, and still allow image traffic to flow between the compute and object storage nodes. Configure the ``my_block_storage_ip`` option in the storage node configuration file to allow block storage traffic to reach the compute node. Certain back ends support a more direct method, where on request the Image service will return a URL that links directly to the back-end store. You can download the image using this approach. Currently, the only store to support the direct download approach is the filesystem store. Configured the approach using the ``filesystems`` option in the ``image_file_url`` section of the ``nova.conf`` file on compute nodes. Compute nodes also implement caching of images, meaning that if an image has been used before it won't necessarily be downloaded every time. Information on the configuration options for caching on compute nodes can be found in the `Configuration Reference <../configuration/>`__. Instance building blocks ~~~~~~~~~~~~~~~~~~~~~~~~ In OpenStack, the base operating system is usually copied from an image stored in the OpenStack Image service. This results in an ephemeral instance that starts from a known template state and loses all accumulated states on shutdown. You can also put an operating system on a persistent volume in Compute or the Block Storage volume system. This gives a more traditional, persistent system that accumulates states that are preserved across restarts. To get a list of available images on your system, run: .. code-block:: console $ glance image-list +--------------------------------------+-----------------------------+ | ID | Name | +--------------------------------------+-----------------------------+ | aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | +--------------------------------------+-----------------------------+ | 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | +--------------------------------------+-----------------------------+ | df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | +--------------------------------------+-----------------------------+ The displayed image attributes are: ``ID`` Automatically generated or user provided UUID of the image. ``Name`` Free form, human-readable name for the image. Virtual hardware templates are called ``flavors``, and are defined by administrators. Prior to the Newton release, a default installation also includes five predefined flavors. For a list of flavors that are available on your system, run: .. code-block:: console $ openstack flavor list +-----+-----------+-------+------+-----------+-------+-----------+ | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | +-----+-----------+-------+------+-----------+-------+-----------+ | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | | 4 | m1.large | 8192 | 80 | 0 | 4 | True | | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | +-----+-----------+-------+------+-----------+-------+-----------+ By default, administrative users can configure the flavors. You can change this behavior by redefining the access controls for ``compute_extension:flavormanage`` in ``/etc/nova/policy.yaml`` on the ``compute-api`` server. Instance management tools ~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack provides command-line, web interface, and API-based instance management tools. Third-party management tools are also available, using either the native API or the provided EC2-compatible API. The OpenStack python-openstackclient package provides a basic command-line utility, which uses the :command:`openstack` command. This is available as a native package for most Linux distributions, or you can install the latest version using the pip python package installer: .. code-block:: console # pip install python-openstackclient For more information about python-openstackclient and other command-line tools, see the `OpenStack End User Guide <../cli/index.html>`__. Latest image management tools can be installed using the pip package manager: .. code-block:: console # pip install python-glanceclient This package provides you the :command:`glance` for managing all your images. Control where instances run ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The `Scheduling section `__ of OpenStack Configuration Reference provides detailed information on controlling where your instances run, including ensuring a set of instances run on different compute nodes for service resiliency or on the same node for high performance inter-instance communications. Administrative users can specify which compute node their instances run on. To do this, specify the ``--availability-zone AVAILABILITY_ZONE:COMPUTE_HOST`` parameter. Launch instances with UEFI ~~~~~~~~~~~~~~~~~~~~~~~~~~ Unified Extensible Firmware Interface (UEFI) is a standard firmware designed to replace legacy BIOS. There is a slow but steady trend for operating systems to move to the UEFI format and, in some cases, make it their only format. **To configure UEFI environment** To successfully launch an instance from an UEFI image in QEMU/KVM environment, the administrator has to install the following packages on compute node: - OVMF, a port of Intel's tianocore firmware to QEMU virtual machine. - libvirt, which has been supporting UEFI boot since version 1.2.9. Because default UEFI loader path is ``/usr/share/OVMF/OVMF_CODE.fd``, the administrator must create one link to this location after UEFI package is installed. **To upload UEFI images** To launch instances from a UEFI image, the administrator first has to upload one UEFI image. To do so, ``hw_firmware_type`` property must be set to ``uefi`` when the image is created. For example: .. code-block:: console $ glance image-create-via-import --container-format bare \ --disk-format qcow2 --property hw_firmware_type=uefi \ --file /tmp/cloud-uefi.qcow --name uefi After that, you can launch instances from this UEFI image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/useful-image-properties.rst0000664000175000017500000006420300000000000023527 0ustar00zuulzuul00000000000000======================= Useful image properties ======================= You can set image properties that can be consumed by other services to affect the behavior of those other services. For example: * Image properties can be used to override specific behaviors defined for Nova flavors * Image properties can be used to affect the behavior of the Nova scheduler * Image properties can be used to affect the behavior of particular Nova hypervisors * Image properties can be used to provide additional information to Ironic (even when Nova is not used) Using image properties ---------------------- Some important points to keep in mind: * The ``glance-api.conf`` setting ``image_property_quota`` should be sufficiently high to allow any additional desired properties. (The default is 128.) * You can use Glance *property protections* to control access to specific image properties, should that be desirable. See the :ref:`property-protections` section of this Guide for more information. * Glance reserves properties namespaced with the ``os_glance`` prefix for its own use and will refuse attempts by API users to set or change them. * You can use a plugin to the interoperable image import process to set specific properties on non-admin images imported into Glance. See :ref:`iir_plugins` for more information. See the original spec, `Inject metadata properties automatically to non-admin images `_ for a discussion of the use case addressed by this plugin. * The Nova **ImagePropertiesFilter**, enabled by default in the Compute Service, consumes image properties to determine proper scheduling of builds to compute hosts. See the `Compute schedulers `_ section of the Nova Configuration Guide for more information. * Nova has a setting, ``non_inheritable_image_properties``, that allows you to specify which image properties from the image a virtual machine was booted from will *not* be propagated to a snapshot image of that virtual machine. See the `Configuration Options `_ section of the Nova Configuration Guide for more information. * Some properties recognized by Nova may have no effect unless a corresponding property is enabled in the server flavor. For example, the ``hw_rng_model`` image property has no effect unless the Nova flavor has been configured to have ``hw_rng:allowed`` set to True in the flavor's extra_specs. * In a mixed hypervisor environment, the Compute Service uses the ``hypervisor_type`` image property to match images to the correct hypervisor type. Depending upon what hypervisors are in use in your Nova installation, there may be other image properties that these hypervisors can consume to affect their behavior. Read through the configuration information for your hypervisors in the `Hypervisors `_ section of the Nova Configuration Guide for more information. In particular, the VMware hypervisor driver requires that particular image properties be set for optimal functioning. See the `VMware vSphere `_ section of the Nova Configuration Guide for more information. .. _image_property_keys_and_values: Image property keys and values ------------------------------ Here is a list of useful image properties and the values they expect. ``architecture`` :Type: str The CPU architecture that must be supported by the hypervisor. For example, ``x86_64``, ``arm``, or ``ppc64``. Run :command:`uname -m` to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the `libosinfo project `_ for this purpose. One of: * ``aarch64`` - `ARM 64-bit `_ * ``alpha`` - `DEC 64-bit RISC `_ * ``armv7l`` - `ARM Cortex-A7 MPCore `_ * ``cris`` - `Ethernet, Token Ring, AXis—Code Reduced Instruction Set `_ * ``i686`` - `Intel sixth-generation x86 (P6 micro architecture) `_ * ``ia64`` - `Itanium `_ * ``lm32`` - `Lattice Micro32 `_ * ``m68k`` - `Motorola 68000 `_ * ``microblaze`` - `Xilinx 32-bit FPGA (Big Endian) `_ * ``microblazeel`` - `Xilinx 32-bit FPGA (Little Endian) `_ * ``mips`` - `MIPS 32-bit RISC (Big Endian) `_ * ``mipsel`` - `MIPS 32-bit RISC (Little Endian) `_ * ``mips64`` - `MIPS 64-bit RISC (Big Endian) `_ * ``mips64el`` - `MIPS 64-bit RISC (Little Endian) `_ * ``openrisc`` - `OpenCores RISC `_ * ``parisc`` - `HP Precision Architecture RISC `_ * ``parisc64`` - `HP Precision Architecture 64-bit RISC `_ * ``ppc`` - `PowerPC 32-bit `_ * ``ppc64`` - `PowerPC 64-bit `_ * ``ppcemb`` - `PowerPC (Embedded 32-bit) `_ * ``s390`` - `IBM Enterprise Systems Architecture/390 `_ * ``s390x`` - `S/390 64-bit `_ * ``sh4`` - `SuperH SH-4 (Little Endian) `_ * ``sh4eb`` - `SuperH SH-4 (Big Endian) `_ * ``sparc`` - `Scalable Processor Architecture, 32-bit `_ * ``sparc64`` - `Scalable Processor Architecture, 64-bit `_ * ``unicore32`` - `Microprocessor Research and Development Center RISC Unicore32 `_ * ``x86_64`` - `64-bit extension of IA-32 `_ * ``xtensa`` - `Tensilica Xtensa configurable microprocessor core `_ * ``xtensaeb`` - `Tensilica Xtensa configurable microprocessor core `_ (Big Endian) ``hypervisor_type`` :Type: str The hypervisor type. Note that ``qemu`` is used for both QEMU and KVM hypervisor types. One of: - ``hyperv`` - ``ironic`` - ``lxc`` - ``qemu`` - ``uml`` - ``vmware`` - ``xen`` ``instance_uuid`` :Type: str For snapshot images, this is the UUID of the server used to create this image. The value must be a valid server UUID. ``img_config_drive`` :Type: str Specifies whether the image needs a config drive. One of: - ``mandatory`` - ``optional`` (default if property is not used) ``img_type`` :Type: str Specifies the partitioning type of the image. The default value is ``partition`` if the ``kernel_id``/``ramdisk_id`` properties are present, otherwise ``whole-disk``. One of: - ``whole-disk`` - an image with a partition table embedded. - ``partition`` - an image with only the root partition without a partition table. .. note:: This property is currently only recognized by Ironic. ``kernel_id`` :Type: str The ID of an image stored in the Image service that should be used as the kernel when booting an AMI-style image. The value must be a valid image ID ``os_admin_user`` :Type: str The name of the user with admin privileges. The value must be a valid username (defaults to ``root`` for Linux guests and ``Administrator`` for Windows guests). ``os_distro`` :Type: str The common name of the operating system distribution in lowercase (uses the same data vocabulary as the `libosinfo project`_). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value. One of: * ``arch`` - Arch Linux. Do not use ``archlinux`` or ``org.archlinux``. * ``centos`` - Community Enterprise Operating System. Do not use ``org.centos`` or ``CentOS``. * ``debian`` - Debian. Do not use ``Debian` or ``org.debian``. * ``fedora`` - Fedora. Do not use ``Fedora``, ``org.fedora``, or ``org.fedoraproject``. * ``freebsd`` - FreeBSD. Do not use ``org.freebsd``, ``freeBSD``, or ``FreeBSD``. * ``gentoo`` - Gentoo Linux. Do not use ``Gentoo`` or ``org.gentoo``. * ``mandrake`` - Mandrakelinux (MandrakeSoft) distribution. Do not use ``mandrakelinux`` or ``MandrakeLinux``. * ``mandriva`` - Mandriva Linux. Do not use ``mandrivalinux``. * ``mes`` - Mandriva Enterprise Server. Do not use ``mandrivaent`` or ``mandrivaES``. * ``msdos`` - Microsoft Disc Operating System. Do not use ``ms-dos``. * ``netbsd`` - NetBSD. Do not use ``NetBSD`` or ``org.netbsd``. * ``netware`` - Novell NetWare. Do not use ``novell`` or ``NetWare``. * ``openbsd`` - OpenBSD. Do not use ``OpenBSD`` or ``org.openbsd``. * ``opensolaris`` - OpenSolaris. Do not use ``OpenSolaris`` or ``org.opensolaris``. * ``opensuse`` - openSUSE. Do not use ``suse``, ``SuSE``, or `` org.opensuse``. * ``rocky`` - Rocky Linux. Do not use ``Rocky`` or ``rockylinux``. * ``rhel`` - Red Hat Enterprise Linux. Do not use ``redhat``, ``RedHat``, or ``com.redhat``. * ``sled`` - SUSE Linux Enterprise Desktop. Do not use ``com.suse``. * ``ubuntu`` - Ubuntu. Do not use ``Ubuntu``, ``com.ubuntu``, ``org.ubuntu``, or ``canonical``. * ``windows`` - Microsoft Windows. Do not use ``com.microsoft.server`` or ``windoze``. ``os_version`` :Type: str The operating system version as specified by the distributor. The value must be a valid version number (for example, ``11.10``). ``os_secure_boot`` :Type: str Secure Boot is a security standard. When the instance starts, Secure Boot first examines software such as firmware and OS by their signature and only allows them to run if the signatures are valid. For Hyper-V: Images must be prepared as Generation 2 VMs. Instance must also contain ``hw_machine_type=hyperv-gen2`` image property. Linux guests will also require bootloader's digital signature provided as ``os_secure_boot_signature`` and ``hypervisor_version_requires'>=10.0'`` image properties. One of: * ``required`` - Enable the Secure Boot feature. * ``disabled`` or ``optional`` - (default if property not used) Disable the Secure Boot feature. ``os_shutdown_timeout`` :Type: int By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up. ``ramdisk_id`` The ID of image stored in the Image service that should be used as the ramdisk when booting an AMI-style image. The value must be a valid image ID. ``rootfs_uuid`` For whole-disk images (see ``img_type`` above), the UUID of the root partition. This property is used by Ironic when configuring software RAID. ``trait:`` :Type: str Added in the Rocky release. Functionality is similar to traits specified in `flavor extra specs `_. Traits allow specifying a server to build on a compute node with the set of traits specified in the image. The traits are associated with the resource provider that represents the compute node in the Placement API. The syntax of specifying traits is **trait:=value**, for example: * ``trait:HW_CPU_X86_AVX2=required`` * ``trait:STORAGE_DISK_SSD=required`` The nova scheduler will pass required traits specified on the image to the Placement API to include only resource providers that can satisfy the required traits. Traits for the resource providers can be managed using the `osc-placement plugin. `_ Image traits are used by the nova scheduler even in cases of volume backed instances, if the volume source is an image with traits. The only valid value is ``required``. Any other value is invalid. One of: * ``required`` - is required on the resource provider that represents the compute node on which the image is launched. ``vm_mode`` :Type: str The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. One of: * ``hvm`` - Fully virtualized. This is the mode used by QEMU and KVM. * ``xen`` - Xen 3.0 paravirtualized. * ``uml`` - User Mode Linux paravirtualized. * ``exe`` - Executables in containers. This is the mode used by LXC. ``hw_cpu_sockets`` :Type: int The preferred number of sockets to expose to the guest. Only supported by the libvirt driver. ``hw_cpu_cores`` :Type: int The preferred number of cores to expose to the guest. Only supported by the libvirt driver. ``hw_cpu_threads`` :Type: int The preferred number of threads to expose to the guest. Only supported by the libvirt driver. ``hw_cpu_policy`` :Type: str Used to pin the virtual CPUs (vCPUs) of instances to the host’s physical CPU cores (pCPUs). Host aggregates should be used to separate these pinned instances from unpinned instances as the latter will not respect the resourcing requirements of the former. Only supported by the libvirt driver. One of: * ``shared`` - (default if property not specified) The guest vCPUs will be allowed to freely float across host pCPUs, albeit potentially constrained by NUMA policy. * ``dedicated`` - The guest vCPUs will be strictly pinned to a set of host pCPUs. In the absence of an explicit vCPU topology request, the drivers typically expose all vCPUs as sockets with one core and one thread. When strict CPU pinning is in effect the guest CPU topology will be setup to match the topology of the CPUs to which it is pinned. This option implies an overcommit ratio of 1.0. For example, if a two vCPU guest is pinned to a single host core with two threads, then the guest will get a topology of one socket, one core, two threads. ``hw_cpu_thread_policy`` :Type: str Further refine ``hw_cpu_policy=dedicated`` by stating how hardware CPU threads in a simultaneous multithreading-based (SMT) architecture be used. SMT-based architectures include Intel processors with Hyper-Threading technology. In these architectures, processor cores share a number of components with one or more other cores. Cores in such architectures are commonly referred to as hardware threads, while the cores that a given core share components with are known as thread siblings. Only supported by the libvirt driver. One of: * ``prefer`` - (default if property not specified) The host may or may not have an SMT architecture. Where an SMT architecture is present, thread siblings are preferred. * ``isolate`` - The host must not have an SMT architecture or must emulate a non-SMT architecture. If the host does not have an SMT architecture, each vCPU is placed on a different core as expected. If the host does have an SMT architecture - that is, one or more cores have thread siblings - then each vCPU is placed on a different physical core. No vCPUs from other guests are placed on the same core. All but one thread sibling on each utilized core is therefore guaranteed to be unusable. * ``require`` - The host must have an SMT architecture. Each vCPU is allocated on thread siblings. If the host does not have an SMT architecture, then it is not used. If the host has an SMT architecture, but not enough cores with free thread siblings are available, then scheduling fails. ``hw_cdrom_bus`` :Type: str Specifies the type of disk controller to attach CD-ROM devices to. As for ``hw_disk_bus``. Only supported by the libvirt driver. ``hw_disk_bus`` :Type: str Specifies the type of disk controller to attach disk devices to. Only supported by the libvirt driver. Options depend on the value of `nova's virt_type config option `_: * For ``qemu`` and ``kvm``: one of ``scsi``, ``virtio``, ``uml``, ``xen``, ``ide``, ``usb``, or ``lxc``. * For ``xen``: one of ``xen`` or ``ide``. * For ``uml``: must be ``uml``. * For ``lxc``: must be ``lxc``. * For ``parallels``: one of ``ide`` or ``scsi``. ``hw_firmware_type`` Specifies the type of firmware with which to boot the guest. Only supported by the libvirt driver. One of: * ``bios`` * ``uefi`` ``hw_firmware_stateless`` :Type: bool Specifies whether the image should be booted with stateless firmware. If true, firmware configurations do not persist over server reboot. Only supported by the libvirt driver. Also UEFI firmware is required. ``hw_mem_encryption`` :Type: bool Enables encryption of guest memory at the hardware level, if there are compute hosts available which support this. See `nova's documentation on configuration of the KVM hypervisor `_ for more details. Only supported by the libvirt driver. ``hw_virtio_packed_ring`` :Type: bool Enables Packed VIRT-IO Queue feature. When set to true, instance will be scheduled to hosts that support negotiating the packed virt queue format. This feature may or may not be enabled depending on the guest driver. When used it will improve the small packet performance of network io. Only supported by the libvirt driver. ``hw_pointer_model`` :Type: str Input devices that allow interaction with a graphical framebuffer, for example to provide a graphic tablet for absolute cursor movement. Currently only supported by the KVM/QEMU hypervisor configuration and VNC or SPICE consoles must be enabled. Only supported by the libvirt driver. One of: - ``usbtablet`` ``hw_rng_model`` :Type: str Adds a random-number generator device to the image's instances. This image property by itself does not guarantee that a hardware RNG will be used; it expresses a preference that may or may not be satisfied depending upon Nova configuration. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: * The generator device is disabled. * ``/dev/urandom`` is used as the default entropy source. To specify a physical hardwre RNG device, use the following option in the ``nova.conf`` file: .. code-block:: ini rng_dev_path=/dev/hwrng * The use of a hardware random number generator must be configured in a flavor's extra_specs by setting ``hw_rng:allowed`` to True in the flavor definition. Only supported by the libvirt driver. One of: - ``virtio`` - Other supported device. ``hw_time_hpet`` :Type: bool Adds support for the High Precision Event Timer (HPET) for x86 guests in the libvirt driver when ``hypervisor_type=qemu`` and ``architecture=i686`` or ``architecture=x86_64``. The timer can be enabled by setting ``hw_time_hpet=true``. By default HPET remains disabled. Only supported by the libvirt driver. ``hw_machine_type`` :Type: str For libvirt: Enables booting an ARM system using the specified machine type. If an ARM image is used and its machine type is not explicitly specified, then Compute uses the ``virt`` machine type as the default for ARMv7 and AArch64. For Hyper-V: Specifies whether the Hyper-V instance will be a generation 1 or generation 2 VM. By default, if the property is not provided, the instances will be generation 1 VMs. If the image is specific for generation 2 VMs but the property is not provided accordingly, the instance will fail to boot. For libvirt: Valid types can be viewed by using the :command:`virsh capabilities` command (machine types are displayed in the ``machine`` tag). For hyper-V: Acceptable values are either ``hyperv-gen1`` or ``hyperv-gen2``. Only supported by the libvirt and Hyper-V drivers. ``os_type`` :Type: str The operating system installed on the image. The ``libvirt`` API driver contains logic that takes different actions depending on the value of the ``os_type`` parameter of the image. For example, for ``os_type=windows`` images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters. Only supported by the libvirt driver. One of: * ``linux`` * ``windows`` ``hw_scsi_model`` :Type: str Enables the use of VirtIO SCSI (``virtio-scsi``) to provide block device access for compute instances; by default, instances use VirtIO Block (``virtio-blk``). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware. Only supported by the libvirt driver. One of: * ``virtio-scsi`` ``hw_serial_port_count`` :Type: int Specifies the count of serial ports that should be provided. If ``hw:serial_port_count`` is not set in the flavor's extra_specs, then any count is permitted. If ``hw:serial_port_count`` is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value. Only supported by the libvirt driver. ``hw_video_model`` :Type: str The graphic device model presented to the guest. ``none`` disables the graphics device in the guest and should generally be used when using GPU passthrough. One of: * ``vga`` * ``cirrus`` * ``vmvga`` * ``xen`` * ``qxl`` * ``virtio`` * ``gop`` * ``none`` * ``bochs`` Only supported by the libvirt driver. ``hw_video_ram`` :Type: int Maximum RAM in MB for the video image. Used only if a ``hw_video:ram_max_mb`` value has been set in the flavor's extra_specs and that value is higher than the value set in ``hw_video_ram``. Only supported by the libvirt driver. ``hw_watchdog_action`` :Type: str Enables a virtual hardware watchdog device that carries out the specified action if the server hangs. The watchdog uses the ``i6300esb`` device (emulating a PCI Intel 6300ESB). If ``hw_watchdog_action`` is not specified, the watchdog is disabled. Only supported by the libvirt driver. One of: * ``disabled`` - (default) The device is not attached. Allows the user to disable the watchdog for the image, even if it has been enabled using the image's flavor. * ``reset`` - Forcefully reset the guest. * ``poweroff`` - Forcefully power off the guest. * ``pause`` - Pause the guest. * ``none`` - Only enable the watchdog; do nothing if the server hangs. ``os_command_line`` :Type: str The kernel command line to be used by the ``libvirt`` driver, instead of the default. For Linux Containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ``ramdisk``, or machine images (``aki``, ``ari``, or ``ami``). Only supported by the libvirt driver. ``hw_vif_model`` :Type: str Specifies the model of virtual network interface device to use. Only supported by the libvirt driver and VMware API drivers. The valid options depend on the configured hypervisor. * ``KVM`` and ``QEMU``: ``e1000``, ``e1000e``, ``ne2k_pci``, ``pcnet``, ``rtl8139``, ``virtio`` and ``vmxnet3``. * VMware: ``e1000``, ``e1000e``, ``VirtualE1000``, ``VirtualE1000e``, ``VirtualPCNet32``, ``VirtualVmxnet`` and ``VirtualVmxnet3``. * Xen: ``e1000``, ``netfront``, ``ne2k_pci``, ``pcnet``, and ``rtl8139``. ``hw_vif_multiqueue_enabled`` :Type: bool If ``true``, this enables the ``virtio-net multiqueue`` feature. In this case, the driver sets the number of queues equal to the number of guest vCPUs. This makes the network performance scale across a number of vCPUs. Only supported by the libvirt driver. ``hw_boot_menu`` :Type: bool If ``true``, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed. Only supported by the libvirt driver. ``hw_pmu`` :Type: bool Controls emulation of a virtual performance monitoring unit (vPMU) in the guest. To reduce latency in realtime workloads disable the vPMU by setting ``hw_pmu=false``. Only supported by the libvirt driver. ``img_hide_hypervisor_id`` :Type: bool Some hypervisors add a signature to their guests. While the presence of the signature can enable some paravirtualization features on the guest, it can also have the effect of preventing some drivers from loading. Hiding the signature by setting this property to ``true`` may allow such drivers to load and work. Only supported by the libvirt driver. ``vmware_adaptertype`` :Type: str The virtual SCSI or IDE controller used by the hypervisor. Only supported by the VMWare API driver. One of: * ``lsiLogic`` * ``lsiLogicsas`` * ``busLogic`` * ``ide`` * ``paraVirtual`` ``vmware_ostype`` A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to ``otherGuest``. See `thinkvirt.com `_ for supported values. Only supported by the VMWare API driver. ``vmware_image_version`` :Type: int Currently unused. ``instance_type_rxtx_factor`` :Type: float Deprecated and currently unused. ``auto_disk_config`` :Type: bool Deprecated and currently unused. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/admin/zero-downtime-db-upgrade.rst0000664000175000017500000001766000000000000023572 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _zero-downtime: Zero-Downtime Database Upgrades =============================== .. warning:: This feature is EXPERIMENTAL in the Ocata, Pike and Queens releases. We encourage operators to try it out, but its use in production environments is currently NOT SUPPORTED. A *zero-downtime database upgrade* enables true rolling upgrades of the Glance nodes in your cloud's control plane. At the appropriate point in the upgrade, you can have a mixed deployment of release *n* (for example, Ocata) and release *n-1* (for example, Newton) Glance nodes, take the *n-1* release nodes out of rotation, allow them to drain, and then take them out of service permanently, leaving all Glance nodes in your cloud at release *n*. That's a rough sketch of how a rolling upgrade would work. For full details, see :ref:`rolling-upgrades`. .. note:: When we speak of a "database upgrade", we are simply talking about changing the database schema and its data from the version used in OpenStack release *n* (say, Pike) to the version used in OpenStack release *n+1* (say, Queens). We are **not** talking about upgrading the database management software. .. note:: Downgrading a database is not supported. See :ref:`downgrades` for more information. The Expand-Migrate-Contract Cycle --------------------------------- It's possible to characterize three phases of a database upgrade: 1. **Expand**: in this phase, new columns, tables, indexes, are added to the database. 2. **Migrate**: in this phase, data is migrated to the new columns or tables. 3. **Contract**: in this phase, the "old" tables or columns (which are no longer in use) are removed from the database. The "legacy" Glance database migrations performed these phases as part of a single monolithic upgrade script. Currently, the Glance project creates a separate script for each the three parts of the cycle. We call such an upgrade an **E-M-C** database migration. Zero-Downtime Database Upgrade ------------------------------ The E-M-C strategy can be performed offline when Glance is not using the database. With some adjustments, however, the E-M-C strategy can be applied online when the database is in use, making true rolling upgrades possible. .. note:: Don't forget that zero-downtime database upgrades are currently considered experimental and their use in production environments is NOT SUPPORTED. A zero-downtime database upgrade takes place as part of a :ref:`rolling upgrade strategy ` for upgrading your entire Glance installation. In such a situation, you want to upgrade to release *n* of Glance (say, Queens) while your release *n-1* API nodes are still running Pike. To make this possible, in the **Expand** phase, database triggers can be added to the database to keep the data in "old" and "new" columns synchronized. Likewise, after all data has been migrated and all Glance nodes have been updated to release *n* code, these triggers are deleted in the **Contract** phase. .. note:: Unlike the E-M-C scripts, database triggers are particular to each database technology. That's why the Glance project currently provides experimental support only for MySQL. New Database Version Identifiers -------------------------------- In order to perform zero-downtime upgrades, the version identifier of a database becomes more complicated since it must reflect knowledge of what point in the E-M-C cycle the upgrade has reached. To make this evident, the identifier explicitly contains 'expand' or 'contract' as part of its name. Thus the Ocata cycle migration has two identifiers associated with it: ``ocata_expand01`` and ``ocata_contract01``. During the upgrade process, the database is initially marked with ``ocata_expand01``. Eventually, after completing the full upgrade process, the database will be marked with ``ocata_contract01``. So, instead of one database version, an operator will see a composite database version that will have both expand and contract versions. A database will be considered at Ocata version only when both expand and contract revisions are at the latest revisions. For a successful Ocata zero-downtime upgrade, for example, the database will be marked with both ``ocata_expand01``, ``ocata_contract01``. In the case in which there are multiple changes in a cycle, the database version record would go through the following progression: +-------+--------------------------------------+-------------------------+ | stage | database identifier | comment | +=======+======================================+=========================+ | E | ``bexar_expand01`` | upgrade begins | +-------+--------------------------------------+-------------------------+ | E | ``bexar_expand02`` | | +-------+--------------------------------------+-------------------------+ | E | ``bexar_expand03`` | | +-------+--------------------------------------+-------------------------+ | M | ``bexar_expand03`` | bexar_migrate01 occurs | +-------+--------------------------------------+-------------------------+ | M | ``bexar_expand03`` | bexar_migrate02 occurs | +-------+--------------------------------------+-------------------------+ | M | ``bexar_expand03`` | bexar_migrate03 occurs | +-------+--------------------------------------+-------------------------+ | C | ``bexar_expand03, bexar_contract01`` | | +-------+--------------------------------------+-------------------------+ | C | ``bexar_expand03, bexar_contract02`` | | +-------+--------------------------------------+-------------------------+ | C | ``bexar_expand03, bexar_contract03`` | upgrade completed | +-------+--------------------------------------+-------------------------+ Database Upgrade ---------------- For offline database upgrades, the ``glance-manage`` tool still has the ``glance-manage db sync`` command. This command will execute the expand, migrate, and contract scripts for you, just as if they were contained in a single script. In order to enable zero-downtime database upgrades, the ``glance-manage`` tool has been augmented to include the following operations so that you can explicitly manage the upgrade. .. warning:: For MySQL, using the ``glance-manage db expand`` or ``glance-manage db contract`` command requires that you either grant your glance user ``SUPER`` privileges, or run ``set global log_bin_trust_function_creators=1;`` in mysql beforehand. Expanding the Database ~~~~~~~~~~~~~~~~~~~~~~ :: glance-manage db expand This will run the expansion phase of a rolling upgrade process. Database expansion should be run as the first step in the rolling upgrade process before any new services are started. Migrating the Data ~~~~~~~~~~~~~~~~~~ :: glance-manage db migrate This will run the data migrate phase of a rolling upgrade process. Database migration should be run after database expansion but before any new services are started. Contracting the Database ~~~~~~~~~~~~~~~~~~~~~~~~ :: glance-manage db contract This will run the contraction phase of a rolling upgrade process. Database contraction should be run as the last step of the rolling upgrade process after all old services are upgraded to new ones. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8182983 glance-29.0.0/doc/source/cli/0000775000175000017500000000000000000000000015672 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/footer.txt0000664000175000017500000000032400000000000017730 0ustar00zuulzuul00000000000000 SEE ALSO ======== * `OpenStack Glance `__ BUGS ==== * Glance bugs are tracked in Launchpad so you can view current bugs at `OpenStack Glance `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/general_options.txt0000664000175000017500000000436700000000000021635 0ustar00zuulzuul00000000000000 ``-h, --help`` Show the help message and exit ``--version`` Print the version number and exit ``-v, --verbose`` Print more verbose output ``--noverbose`` Disable verbose output ``-d, --debug`` Print debugging output (set logging level to DEBUG instead of default WARNING level) ``--nodebug`` Disable debugging output ``--use-syslog`` Use syslog for logging ``--nouse-syslog`` Disable the use of syslog for logging ``--syslog-log-facility SYSLOG_LOG_FACILITY`` syslog facility to receive log lines ``--config-dir DIR`` Path to a config directory to pull \*.conf files from. This file set is sorted, to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. This means that configuration from files in a specified config-dir will always take precedence over configuration from files specified by --config-file, regardless to argument order. ``--config-file PATH`` Path to a config file to use. Multiple config files can be specified by using this flag multiple times, for example, --config-file --config-file . Values in latter files take precedence. ``--log-config-append PATH`` ``--log-config PATH`` The name of logging configuration file. It does not disable existing loggers, but just appends specified logging configuration to any other existing logging options. Please see the Python logging module documentation for details on logging configuration files. The log-config name for this option is deprecated. ``--log-format FORMAT`` A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: None ``--log-date-format DATE_FORMAT`` Format string for %(asctime)s in log records. Default: None ``--log-file PATH, --logfile PATH`` (Optional) Name of log file to output to. If not set, logging will go to stdout. ``--log-dir LOG_DIR, --logdir LOG_DIR`` (Optional) The directory to keep log files in (will be prepended to --log-file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glanceapi.rst0000664000175000017500000000076400000000000020356 0ustar00zuulzuul00000000000000========== glance-api ========== --------------------------------------- Server for the Glance Image Service API --------------------------------------- .. include:: header.txt SYNOPSIS ======== :: glance-api [options] DESCRIPTION =========== glance-api is a server daemon that serves the Glance API OPTIONS ======= **General options** .. include:: general_options.txt FILES ===== **/etc/glance/glance-api.conf** Default configuration file for Glance API .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancecachecleaner.rst0000664000175000017500000000174000000000000022175 0ustar00zuulzuul00000000000000==================== glance-cache-cleaner ==================== ---------------------------------------------------------------- Glance Image Cache Invalid Cache Entry and Stalled Image cleaner ---------------------------------------------------------------- .. include:: header.txt SYNOPSIS ======== :: glance-cache-cleaner [options] DESCRIPTION =========== This is meant to be run as a periodic task from cron. If something goes wrong while we're caching an image (for example the fetch times out, or an exception is raised), we create an 'invalid' entry. These entries are left around for debugging purposes. However, after some period of time, we want to clean these up. Also, if an incomplete image hangs around past the image_cache_stall_time period, we automatically sweep it up. OPTIONS ======= **General options** .. include:: general_options.txt FILES ===== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancecachemanage.rst0000664000175000017500000000334700000000000022021 0ustar00zuulzuul00000000000000=================== glance-cache-manage =================== ------------------------ Cache management utility ------------------------ .. include:: header.txt SYNOPSIS ======== :: glance-cache-manage [options] [args] COMMANDS ======== ``help `` Output help for one of the commands below ``list-cached`` List all images currently cached ``list-queued`` List all images currently queued for caching ``queue-image`` Queue an image for caching ``delete-cached-image`` Purges an image from the cache ``delete-all-cached-images`` Removes all images from the cache ``delete-queued-image`` Deletes an image from the cache queue ``delete-all-queued-images`` Deletes all images from the cache queue OPTIONS ======= ``--version`` show program's version number and exit ``-h, --help`` show this help message and exit ``-v, --verbose`` Print more verbose output ``-d, --debug`` Print more verbose output ``-H ADDRESS, --host=ADDRESS`` Address of Glance API host. Default: 0.0.0.0 ``-p PORT, --port=PORT`` Port the Glance API host listens on. Default: 9292 ``-k, --insecure`` Explicitly allow glance to perform "insecure" SSL (https) requests. The server's certificate will not be verified against any certificate authorities. This option should be used with caution. ``-A TOKEN, --auth_token=TOKEN`` Authentication token to use to identify the client to the glance server ``-f, --force`` Prevent select actions from requesting user confirmation ``-S STRATEGY, --os-auth-strategy=STRATEGY`` Authentication strategy (keystone or noauth) .. include:: openstack_options.txt .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancecacheprefetcher.rst0000664000175000017500000000105600000000000022713 0ustar00zuulzuul00000000000000======================= glance-cache-prefetcher ======================= ------------------------------ Glance Image Cache Pre-fetcher ------------------------------ .. include:: header.txt SYNOPSIS ======== :: glance-cache-prefetcher [options] DESCRIPTION =========== This is meant to be run from the command line after queueing images to be pretched. OPTIONS ======= **General options** .. include:: general_options.txt FILES ===== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancecachepruner.rst0000664000175000017500000000115300000000000022075 0ustar00zuulzuul00000000000000=================== glance-cache-pruner =================== ------------------- Glance cache pruner ------------------- .. include:: header.txt SYNOPSIS ======== :: glance-cache-pruner [options] DESCRIPTION =========== Prunes images from the Glance cache when the space exceeds the value set in the image_cache_max_size configuration option. This is meant to be run as a periodic task, perhaps every half-hour. OPTIONS ======= **General options** .. include:: general_options.txt FILES ===== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancecontrol.rst0000664000175000017500000000217200000000000021260 0ustar00zuulzuul00000000000000============== glance-control ============== -------------------------------------- Glance daemon start/stop/reload helper -------------------------------------- .. include:: header.txt SYNOPSIS ======== :: glance-control [options] [] Where is one of: ``all``, ``api``, ``glance-api``, ``registry``, ``glance-registry``, ``scrubber``, ``glance-scrubber`` And is one of: ``start``, ``status``, ``stop``, ``shutdown``, ``restart``, ``reload``, ``force-reload`` And is the optional configuration file to use. OPTIONS ======= **General Options** .. include:: general_options.txt ``--pid-file=PATH`` File to use as pid file. Default: /var/run/glance/$server.pid ``--await-child DELAY`` Period to wait for service death in order to report exit code (default is to not wait at all) ``--capture-output`` Capture stdout/err in syslog instead of discarding ``--nocapture-output`` The inverse of --capture-output ``--norespawn`` The inverse of --respawn ``--respawn`` Restart service on unexpected death .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancemanage.rst0000664000175000017500000000757100000000000021040 0ustar00zuulzuul00000000000000============= glance-manage ============= ------------------------- Glance Management Utility ------------------------- .. include:: header.txt SYNOPSIS ======== :: glance-manage [options] DESCRIPTION =========== glance-manage is a utility for managing and configuring a Glance installation. One important use of glance-manage is to setup the database. To do this run:: glance-manage db_sync Note: glance-manage commands can be run either like this:: glance-manage db sync or with the db commands concatenated, like this:: glance-manage db_sync COMMANDS ======== ``db`` This is the prefix for the commands below when used with a space rather than a _. For example "db version". ``db_version`` This will print the current migration level of a glance database. ``db_upgrade [VERSION]`` This will take an existing database and upgrade it to the specified VERSION. ``db_version_control`` Place the database under migration control. ``db_sync [VERSION]`` Place an existing database under migration control and upgrade it to the specified VERSION. ``db_expand`` Run this command to expand the database as the first step of a rolling upgrade process. ``db_migrate`` Run this command to migrate the database as the second step of a rolling upgrade process. ``db_contract`` Run this command to contract the database as the last step of a rolling upgrade process. ``db_export_metadefs [PATH | PREFIX]`` Export the metadata definitions into json format. By default the definitions are exported to /etc/glance/metadefs directory. ``Note: this command will overwrite existing files in the supplied or default path.`` ``db_load_metadefs [PATH]`` Load the metadata definitions into glance database. By default the definitions are imported from /etc/glance/metadefs directory. ``db_unload_metadefs`` Unload the metadata definitions. Clears the contents of all the glance db tables including metadef_namespace_resource_types, metadef_tags, metadef_objects, metadef_resource_types, metadef_namespaces and metadef_properties. ``db_purge`` Purge deleted rows older than a given age from glance db tables. This command interprets the following options when it is invoked: --max_rows Purge deleted rows older than age in days (default value if not specified: 100) --age_in_days Limit number of records to delete (default value if not specified: 30 days) WARNING: This function is useful primarily in test systems. We do not recommend its use in production systems unless you have reviewed OpenStack Security Note `OSSN-0075`_ and understand the risk involved. .. _`OSSN-0075`: https://wiki.openstack.org/wiki/OSSN/OSSN-0075 ``db_purge_images_table`` Purge deleted rows older than a given age from images db tables. This command interprets the following options when it is invoked: --max_rows Purge deleted rows older than age in days (default value if not specified: 100) --age_in_days Limit number of records to delete (default value if not specified: 30 days) WARNING: This function is useful primarily in test systems. We do not recommend its use in production systems unless you have reviewed OpenStack Security Note `OSSN-0075`_ and understand the risk involved. .. _`OSSN-0075`: https://wiki.openstack.org/wiki/OSSN/OSSN-0075 OPTIONS ======= **General Options** .. include:: general_options.txt .. include:: footer.txt CONFIGURATION ============= The following paths are searched for a ``glance-manage.conf`` file in the following order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` All options set in ``glance-manage.conf`` override those set in ``glance-api.conf``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancereplicator.rst0000664000175000017500000000374100000000000021747 0ustar00zuulzuul00000000000000================= glance-replicator ================= --------------------------------------------- Replicate images across multiple data centers --------------------------------------------- .. include:: header.txt SYNOPSIS ======== :: glance-replicator [options] [args] DESCRIPTION =========== glance-replicator is a utility can be used to populate a new glance server using the images stored in an existing glance server. The images in the replicated glance server preserve the uuids, metadata, and image data from the original. COMMANDS ======== ``help `` Output help for one of the commands below ``compare`` What is missing from the slave glance? ``dump`` Dump the contents of a glance instance to local disk. ``livecopy`` Load the contents of one glance instance into another. ``load`` Load the contents of a local directory into glance. ``size`` Determine the size of a glance instance if dumped to disk. OPTIONS ======= ``-h, --help`` Show this help message and exit ``-c CHUNKSIZE, --chunksize=CHUNKSIZE`` Amount of data to transfer per HTTP write ``-d, --debug`` Print debugging information ``-D DONTREPLICATE, --dontreplicate=DONTREPLICATE`` List of fields to not replicate ``-m, --metaonly`` Only replicate metadata, not images ``-l LOGFILE, --logfile=LOGFILE`` Path of file to log to ``-s, --syslog`` Log to syslog instead of a file ``-t TOKEN, --token=TOKEN`` Pass in your authentication token if you have one. If you use this option the same token is used for both the master and the slave. ``-M MASTERTOKEN, --mastertoken=MASTERTOKEN`` Pass in your authentication token if you have one. This is the token used for the master. ``-S SLAVETOKEN, --slavetoken=SLAVETOKEN`` Pass in your authentication token if you have one. This is the token used for the slave. ``-v, --verbose`` Print more verbose output .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancescrubber.rst0000664000175000017500000001225700000000000021414 0ustar00zuulzuul00000000000000=============== glance-scrubber =============== -------------------- Glance scrub service -------------------- .. include:: header.txt SYNOPSIS ======== :: glance-scrubber [options] DESCRIPTION =========== glance-scrubber is a utility that allows an operator to configure Glance for the asynchronous deletion of images or to revert the image's status from `pending_delete` to `active`. Whether this makes sense for your deployment depends upon the storage backend you are using and the size of typical images handled by your Glance installation. An image in glance is really a combination of an image record (stored in the database) and a file of image data (stored in a storage backend). Under normal operation, the image-delete call is synchronous, that is, Glance receives the DELETE request, deletes the image data from the storage backend, then deletes the image record from the database, and finally returns a 204 as the result of the call. If the backend is fast and deletion time is not a function of data size, these operations occur very quickly. For backends where deletion time is a function of data size, however, the image-delete operation can take a significant amount of time to complete, to the point where a client may timeout waiting for the response. This in turn leads to user dissatisfaction. To avoid this problem, Glance has a ``delayed_delete`` configuration option (False by default) that may be set in the **glance-api.conf** file. With this option enabled, when Glance receives a DELETE request, it does *only* the database part of the request, marking the image's status as ``pending_delete``, and returns immediately. (The ``pending_delete`` status is not visible to users; an image-show request for such an image will return 404.) However, it is important to note that when ``delayed_delete`` is enabled, *Glance does not delete image data from the storage backend*. That's where the glance-scrubber comes in. The glance-scrubber cleans up images that have been deleted. If you run Glance with ``delayed_delete`` enabled, you *must* run the glance-scrubber occasionally or your storage backend will eventually fill up with "deleted" image data. The glance-scrubber can also revert a image to `active` if operators delete the image by mistake and the pending-delete is enabled in Glance. Please make sure the ``glance-scrubber`` is not running before restoring the image to avoid image data inconsistency. Configuration of glance-scrubber is done in the **glance-scrubber.conf** file. Options are explained in detail in comments in the sample configuration file, so we only point out a few of them here. ``scrub_time`` minimum time in seconds that an image will stay in ``pending_delete`` status (default is 0) ``scrub_pool_size`` configures a thread pool so that scrubbing can be performed in parallel (default is 1, that is, serial scrubbing) ``daemon`` a boolean indicating whether the scrubber should run as a daemon (default is False) ``wakeup_time`` time in seconds between runs when the scrubber is run in daemon mode (ignored if the scrubber is not being run in daemon mode) ``metadata_encryption_key`` If your **glance-api.conf** sets a value for this option (the default is to leave it unset), you must include the same setting in your **glance-scrubber.conf** or the scrubber won't be able to determine the locations of your image data. ``restore`` reset the specified image's status from'pending_delete' to 'active' when the image is deleted by mistake. ``[database]`` As of the Queens release of Glance (16.0.0), the glance-scrubber does not use the deprecated Glance registry, but instead contacts the Glance database directly. Thus your **glance-scrubber.conf** file must contain a [database] section specifying the relevant information. ``[glance_store]`` This section of the file contains the configuration information for the storage backends used by your Glance installation. The usual situation is that whatever your **glance-api.conf** has for the ``[database]`` and ``[glance_store]`` configuration groups should go into your **glance-scrubber.conf**, too. Of course, if you have heavily customized your setup, you know better than we do what you are doing. The key thing is that the scrubber needs to be able to access the Glance database to determine what images need to be scrubbed (and to mark them as deleted once their associated data has been removed from the storage backend), and it needs the glance_store information so it can delete the image data. OPTIONS ======= **General options** .. include:: general_options.txt **-D, --daemon** Run as a long-running process. When not specified (the default) run the scrub operation once and then exits. When specified do not exit and run scrub on wakeup_time interval as specified in the config. **--nodaemon** The inverse of --daemon. Runs the scrub operation once and then exits. This is the default. **--restore ** Restore the specified image status from 'pending_delete' to 'active'. FILES ===== **/etc/glance/glance-scrubber.conf** Default configuration file for the Glance Scrubber .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/glancestatus.rst0000664000175000017500000000456000000000000021126 0ustar00zuulzuul00000000000000============= glance-status ============= --------------------- Glance Status Utility --------------------- .. include:: header.txt SYNOPSIS ======== :: glance-status [options] DESCRIPTION =========== ``glance-status`` is a command line utility to aid operators in upgrading glance by running programmable checks for things that might prevent upgrades. COMMANDS ======== ``upgrade`` This is the prefix for checking the glance deployment for any upgrade issues that might prevent glance from upgrading. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: glance-status upgrade These sections describe the available categories and arguments for :command:`glance-status`. Categories and commands ----------------------- ``glance-status upgrade check`` Performs a release-specific readiness check before restarting services with new code, or upgrading. This command expects to have complete configuration and access to the database. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **18.0.0 (Stein)** * Placeholder to be filled in with checks as they are added in Stein. OPTIONS ======= **General Options** ``-h, --help`` show this help message and exit ``--config-dir DIR`` Path to a config directory to pull `*.conf` files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. ``--config-file PATH`` Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. .. include:: footer.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/header.txt0000664000175000017500000000030300000000000017657 0ustar00zuulzuul00000000000000 :Author: OpenStack Glance Project Team :Contact: glance@lists.launchpad.net :Date: 2019-04-10 :Copyright: OpenStack Foundation :Version: 18.0.0 :Manual section: 1 :Manual group: cloud computing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/index.rst0000664000175000017500000000016600000000000017536 0ustar00zuulzuul00000000000000======================= Glance Utility Programs ======================= .. toctree:: :glob: :maxdepth: 1 * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/cli/openstack_options.txt0000664000175000017500000000133600000000000022200 0ustar00zuulzuul00000000000000 ``-os-auth-token=OS_AUTH_TOKEN`` Defaults to env[OS_AUTH_TOKEN] ``--os-username=OS_USERNAME`` Defaults to env[OS_USERNAME] ``--os-password=OS_PASSWORD`` Defaults to env[OS_PASSWORD] ``--os-region-name=OS_REGION_NAME`` Defaults to env[OS_REGION_NAME] ``--os-tenant-id=OS_TENANT_ID`` Defaults to env[OS_TENANT_ID] ``--os-tenant-name=OS_TENANT_NAME`` Defaults to env[OS_TENANT_NAME] ``--os-auth-url=OS_AUTH_URL`` Defaults to env[OS_AUTH_URL] ``-os-user-domain-id=OS_USER_DOMAIN_ID`` Defaults to env[OS_USER_DOMAIN_ID] ``--os-project-domain-id=OS_PROJECT_DOMAIN_ID`` Defaults to env[OS_PROJECT_DOMAIN_ID] ``--os-domain-id=OS_AUTH_URL`` Defaults to env[OS_DOMAIN_ID] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/conf.py0000664000175000017500000001266500000000000016434 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Glance documentation build configuration file import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../..')) sys.path.insert(0, os.path.abspath('../../bin')) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'stevedore.sphinxext', 'sphinx.ext.viewcode', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'openstackdocstheme', 'sphinxcontrib.apidoc', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/glance' openstackdocs_bug_project = 'glance' openstackdocs_bug_tag = 'documentation' # sphinxcontrib.apidoc options apidoc_module_dir = '../../glance' apidoc_output_dir = 'contributor/api' apidoc_excluded_paths = [ 'hacking/*', 'hacking', 'tests/*', 'tests', 'db/sqlalchemy/*', 'db/sqlalchemy'] apidoc_separate_modules = True config_generator_config_file = [ ('../../etc/oslo-config-generator/glance-api.conf', '_static/glance-api'), ('../../etc/oslo-config-generator/glance-cache.conf', '_static/glance-cache'), ('../../etc/oslo-config-generator/glance-manage.conf', '_static/glance-manage'), ('../../etc/oslo-config-generator/glance-scrubber.conf', '_static/glance-scrubber'), ] policy_generator_config_file = [ ('../../etc/glance-policy-generator.conf', '_static/glance'), ] # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2010-present, OpenStack Foundation.' exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'cli/footer.txt', 'cli/general_options.txt', 'cli/openstack_options.txt', ] # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['glance.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' man_pages = [ ('cli/glanceapi', 'glance-api', 'Glance API Server', ['OpenStack'], 1), ('cli/glancecachecleaner', 'glance-cache-cleaner', 'Glance Cache Cleaner', ['OpenStack'], 1), ('cli/glancecachemanage', 'glance-cache-manage', 'Glance Cache Manager', ['OpenStack'], 1), ('cli/glancecacheprefetcher', 'glance-cache-prefetcher', 'Glance Cache Pre-fetcher', ['OpenStack'], 1), ('cli/glancecachepruner', 'glance-cache-pruner', 'Glance Cache Pruner', ['OpenStack'], 1), ('cli/glancecontrol', 'glance-control', 'Glance Daemon Control Helper ', ['OpenStack'], 1), ('cli/glancemanage', 'glance-manage', 'Glance Management Utility', ['OpenStack'], 1), ('cli/glancereplicator', 'glance-replicator', 'Glance Replicator', ['OpenStack'], 1), ('cli/glancescrubber', 'glance-scrubber', 'Glance Scrubber Service', ['OpenStack'], 1) ] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any paths that contain "extra" files, such as .htaccess or # robots.txt. html_extra_path = ['_extra'] # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. html_use_index = True # -- Options for LaTeX output ------------------------------------------------ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', 'Glance.tex', 'Glance Documentation', 'Glance Team', 'manual'), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8182983 glance-29.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017772 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/configuring.rst0000664000175000017500000020553500000000000023050 0ustar00zuulzuul00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _basic-configuration: Basic Configuration =================== Glance has a number of options that you can use to configure the Glance API server and the various storage backends that Glance can use to store images. Most configuration is done via configuration files. When starting up a Glance server, you can specify the configuration file to use (see :ref:`the documentation on controller Glance servers `). If you do **not** specify a configuration file, Glance will look in the following directories for a configuration file, in order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The Glance API server configuration file should be named ``glance-api.conf``. There are many other configuration files also since Glance maintains a configuration file for each of its services. If you installed Glance via your operating system's package management system, it is likely that you will have sample configuration files installed in ``/etc/glance``. In addition, sample configuration files for each server application with detailed comments are available in the :ref:`Glance Sample Configuration ` section. The PasteDeploy configuration (controlling the deployment of the WSGI application for each component) may be found by default in -paste.ini alongside the main configuration file, .conf. For example, ``glance-api-paste.ini`` corresponds to ``glance-api.conf``. This pathname for the paste config is configurable, as follows:: [paste_deploy] config_file = /path/to/paste/config Common Configuration Options in Glance -------------------------------------- Glance has a few command-line options that are common to all Glance programs: ``--verbose`` Optional. Default: ``False`` Can be specified on the command line and in configuration files. Turns on the INFO level in logging and prints more verbose command-line interface printouts. ``--debug`` Optional. Default: ``False`` Can be specified on the command line and in configuration files. Turns on the DEBUG level in logging. ``--config-file=PATH`` Optional. Default: See below for default search order. Specified on the command line only. Takes a path to a configuration file to use when running the program. If this CLI option is not specified, then we check to see if the first argument is a file. If it is, then we try to use that as the configuration file. If there is no file or there were no arguments, we search for a configuration file in the following order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The filename that is searched for depends on the server application name. So, if you are starting up the API server, ``glance-api.conf`` is searched for. ``--config-dir=DIR`` Optional. Default: ``None`` Specified on the command line only. Takes a path to a configuration directory from which all \*.conf fragments are loaded. This provides an alternative to multiple ``--config-file`` options when it is inconvenient to explicitly enumerate all the configuration files, for example when an unknown number of config fragments are being generated by a deployment framework. If ``--config-dir`` is set, then ``--config-file`` is ignored. An example usage would be:: $ glance-api --config-dir=/etc/glance/glance-api.d $ ls /etc/glance/glance-api.d 00-core.conf 01-swift.conf 02-ssl.conf ... etc. The numeric prefixes in the example above are only necessary if a specific parse ordering is required (i.e. if an individual config option set in an earlier fragment is overridden in a later fragment). Note that ``glance-manage`` currently loads configuration from three files: * ``glance-api.conf`` * ``glance-manage.conf`` By default ``glance-manage.conf`` only specifies a custom logging file but other configuration options for ``glance-manage`` should be migrated in there. **Warning**: Options set in ``glance-manage.conf`` will override options of the same section and name set in ``glance-api.conf`` Configuring Server Startup Options ---------------------------------- You can put the following options in the ``glance-api.conf`` file, under the ``[DEFAULT]`` section. They enable startup and binding behaviour for the API servers, respectively. ``bind_host=ADDRESS`` The address of the host to bind to. Optional. Default: ``0.0.0.0`` ``bind_port=PORT`` The port the server should bind to. Optional. Default: ``9292`` for the API server ``backlog=REQUESTS`` Number of backlog requests to configure the socket with. Optional. Default: ``4096`` ``tcp_keepidle=SECONDS`` Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. Optional. Default: ``600`` ``client_socket_timeout=SECONDS`` Timeout for client connections' socket operations. If an incoming connection is idle for this period it will be closed. A value of `0` means wait forever. Optional. Default: ``900`` ``workers=PROCESSES`` Number of Glance API worker processes to start. Each worker process will listen on the same port. Increasing this value may increase performance (especially if using SSL with compression enabled). Typically it is recommended to have one worker process per CPU. The value `0` will prevent any new worker processes from being created. When ``data_api`` is set to ``glance.db.simple.api``, ``workers`` MUST be set to either ``0`` or ``1``. Optional. Default: The number of CPUs available will be used by default. ``max_request_id_length=LENGTH`` Limits the maximum size of the x-openstack-request-id header which is logged. Affects only if context middleware is configured in pipeline. Optional. Default: ``64`` (Limited by max_header_line default: 16384) Configuring Logging in Glance ----------------------------- There are a number of configuration options in Glance that control how Glance servers log messages. ``--log-config=PATH`` Optional. Default: ``None`` Specified on the command line only. Takes a path to a configuration file to use for configuring logging. Logging Options Available Only in Configuration Files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You will want to place the different logging options in the **[DEFAULT]** section in your application configuration file. As an example, you might do the following for the API server, in a configuration file called ``etc/glance-api.conf``:: [DEFAULT] log_file = /var/log/glance/api.log ``log_file`` The filepath of the file to use for logging messages from Glance's servers. If missing, the default is to output messages to ``stdout``, so if you are running Glance servers in a daemon mode (using ``glance-control``) you should make sure that the ``log_file`` option is set appropriately. ``log_dir`` The filepath of the directory to use for log files. If not specified (the default) the ``log_file`` is used as an absolute filepath. ``log_date_format`` The format string for timestamps in the log output. Defaults to ``%Y-%m-%d %H:%M:%S``. See the `logging module `_ documentation for more information on setting this format string. ``log_use_syslog`` Use syslog logging functionality. Defaults to False. Configuring Glance Storage Backends ----------------------------------- There are a number of configuration options in Glance that control how Glance stores disk images. Enabled backends must be defined in the ``[DEFAULT]`` section. ``enabled_backends=store1_id:store1_type, store2_id:store2_type[,...]`` Required. A comma-separated list of "store_id:store_type" strings. The store ids can be chosen by the user, whereas valid store types are (``filesystem``, ``http``, ``rbd``, ``swift``, ``cinder``, ``vmware``). The default backend must then be set in the ``[glance_store]`` section: ``default_backend = store1_id`` Required. This option must be set to one of the store identifiers used in ``enabled_backends``. Additionally, one section must be created for every key:value pair defined in ``enabled_backends``. Each section must be populated with store-specific options. See :ref:`configuring-multiple-cinder-storage-backend` for a full example. Configuring the Filesystem Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``filesystem_store_datadir=PATH`` Optional. Default: ``/var/lib/glance/images/`` Can only be specified in configuration files. `This option is specific to the filesystem storage backend.` Sets the path where the filesystem storage backend write disk images. Note that the filesystem storage backend will attempt to create this directory if it does not exist. Ensure that the user that ``glance-api`` runs under has write permissions to this directory. ``filesystem_store_file_perm=PERM_MODE`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the filesystem storage backend.` The required permission value, in octal representation, for the created image file. You can use this value to specify the user of the consuming service (such as Nova) as the only member of the group that owns the created files. To keep the default value, assign a permission value that is less than or equal to 0. Note that the file owner must maintain read permission; if this value removes that permission an error message will be logged and the BadStoreConfiguration exception will be raised. If the Glance service has insufficient privileges to change file access permissions, a file will still be saved, but a warning message will appear in the Glance log. ``filesystem_store_chunk_size=SIZE_IN_BYTES`` Optional. Default: ``65536`` Can only be specified in configuration files. `This option is specific to the filesystem storage backend.` The chunk size used when reading or writing image files. Raising this value may improve the throughput but it may also slightly increase the memory usage when handling a large number of requests. Configuring the Filesystem Storage Backend with multiple stores ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``filesystem_store_datadirs=PATH:PRIORITY`` Optional. Default: ``/var/lib/glance/images/:1`` Example:: filesystem_store_datadirs = /var/glance/store filesystem_store_datadirs = /var/glance/store1:100 filesystem_store_datadirs = /var/glance/store2:200 This option can only be specified in configuration file and is specific to the filesystem storage backend only. filesystem_store_datadirs option allows administrators to configure multiple store directories to save glance image in filesystem storage backend. Each directory can be coupled with its priority. **NOTE**: * This option can be specified multiple times to specify multiple stores. * Either filesystem_store_datadir or filesystem_store_datadirs option must be specified in glance-api.conf * Store with priority 200 has precedence over store with priority 100. * If no priority is specified, default priority '0' is associated with it. * If two filesystem stores have same priority store with maximum free space will be chosen to store the image. * If same store is specified multiple times then BadStoreConfiguration exception will be raised. Configuring the Swift Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``swift_store_auth_address=URL`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``auth_address`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the authentication URL supplied to Swift when making calls to its storage system. For more information about the Swift authentication system, please see the `Swift auth `_ documentation. **IMPORTANT NOTE**: Swift authentication addresses use HTTPS by default. This means that if you are running Swift with authentication over HTTP, you need to set your ``swift_store_auth_address`` to the full URL, including the ``http://``. ``swift_store_user=USER`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``user`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the user to authenticate against the ``swift_store_auth_address`` with. ``swift_store_key=KEY`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``key`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the authentication key to authenticate against the ``swift_store_auth_address`` with for the user ``swift_store_user``. ``swift_store_container=CONTAINER`` Optional. Default: ``glance`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Sets the name of the container to use for Glance images in Swift. ``swift_store_create_container_on_put`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` If true, Glance will attempt to create the container ``swift_store_container`` if it does not exist. ``swift_store_large_object_size=SIZE_IN_MB`` Optional. Default: ``5120`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` What size, in MB, should Glance start chunking image files and do a large object manifest in Swift? By default, this is the maximum object size in Swift, which is 5GB ``swift_store_large_object_chunk_size=SIZE_IN_MB`` Optional. Default: ``200`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` When doing a large object manifest, what size, in MB, should Glance write chunks to Swift? The default is 200MB. ``swift_store_multi_tenant=False`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` If set to True enables multi-tenant storage mode which causes Glance images to be stored in tenant specific Swift accounts. When set to False Glance stores all images in a single Swift account. ``swift_store_multiple_containers_seed`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` When set to 0, a single-tenant store will only use one container to store all images. When set to an integer value between 1 and 32, a single-tenant store will use multiple containers to store images, and this value will determine how many characters from an image UUID are checked when determining what container to place the image in. The maximum number of containers that will be created is approximately equal to 16^N. This setting is used only when swift_store_multi_tenant is disabled. Example: if this config option is set to 3 and swift_store_container = 'glance', then an image with UUID 'fdae39a1-bac5-4238-aba4-69bcc726e848' would be placed in the container 'glance_fda'. All dashes in the UUID are included when creating the container name but do not count toward the character limit, so in this example with N=10 the container name would be 'glance_fdae39a1-ba'. When choosing the value for swift_store_multiple_containers_seed, deployers should discuss a suitable value with their swift operations team. The authors of this option recommend that large scale deployments use a value of '2', which will create a maximum of ~256 containers. Choosing a higher number than this, even in extremely large scale deployments, may not have any positive impact on performance and could lead to a large number of empty, unused containers. The largest of deployments could notice an increase in performance if swift rate limits are throttling on single container. Note: If dynamic container creation is turned off, any value for this configuration option higher than '1' may be unreasonable as the deployer would have to manually create each container. ``swift_store_admin_tenants`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: Not set. A list of swift ACL strings that will be applied as both read and write ACLs to the containers created by Glance in multi-tenant mode. This grants the specified tenants/users read and write access to all newly created image objects. The standard swift ACL string formats are allowed, including: : : \*: Multiple ACLs can be combined using a comma separated list, for example: swift_store_admin_tenants = service:glance,*:admin ``swift_store_auth_version`` Can only be specified in configuration files. Deprecated. Use ``auth_version`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Optional. Default: ``2`` A string indicating which version of Swift OpenStack authentication to use. See the project `python-swiftclient `_ for more details. ``swift_store_service_type`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``object-store`` A string giving the service type of the swift service to use. This setting is only used if swift_store_auth_version is ``2``. ``swift_store_region`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: Not set. A string giving the region of the swift service endpoint to use. This setting is only used if swift_store_auth_version is ``2``. This setting is especially useful for disambiguation if multiple swift services might appear in a service catalog during authentication. ``swift_store_endpoint_type`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``publicURL`` A string giving the endpoint type of the swift service endpoint to use. This setting is only used if swift_store_auth_version is ``2``. ``swift_store_ssl_compression`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: True. If set to False, disables SSL layer compression of https swift requests. Setting to 'False' may improve performance for images which are already in a compressed format, e.g. qcow2. If set to True then compression will be enabled (provided it is supported by the swift proxy). ``swift_store_cacert`` Can only be specified in configuration files. Optional. Default: ``None`` A string giving the path to a CA certificate bundle that will allow Glance's services to perform SSL verification when communicating with Swift. ``swift_store_retry_get_count`` The number of times a Swift download will be retried before the request fails. Optional. Default: ``0`` Configuring Multiple Swift Accounts/Stores ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order to not store Swift account credentials in the database, and to have support for multiple accounts (or multiple Swift backing stores), a reference is stored in the database and the corresponding configuration (credentials/ parameters) details are stored in the configuration file. Optional. Default: not enabled. The location for this file is specified using the ``swift_store_config_file`` configuration file in the section ``[DEFAULT]``. **If an incorrect value is specified, Glance API Swift store service will not be configured.** ``swift_store_config_file=PATH`` `This option is specific to the Swift storage backend.` ``default_swift_reference=DEFAULT_REFERENCE`` Required when multiple Swift accounts/backing stores are configured. Can only be specified in configuration files. `This option is specific to the Swift storage backend.` It is the default swift reference that is used to add any new images. ``swift_store_auth_insecure`` If True, bypass SSL certificate verification for Swift. Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``False`` Configuring Swift configuration file ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If ``swift_store_config_file`` is set, Glance will use information from the file specified under this parameter. .. note:: The ``swift_store_config_file`` is currently used only for single-tenant Swift store configurations. If you configure a multi-tenant Swift store back end (``swift_store_multi_tenant=True``), ensure that both ``swift_store_config_file`` and ``default_swift_reference`` are *not* set. The file contains a set of references like: .. code-block:: ini [ref1] user = tenant:user1 key = key1 auth_version = 2 auth_address = http://localhost:5000/v2.0 [ref2] user = project_name:user_name2 key = key2 user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://localhost:5000/v3 A default reference must be configured. Its parameters will be used when creating new images. For example, to specify ``ref2`` as the default reference, add the following value to the [glance_store] section of :file:`glance-api.conf` file: .. code-block:: ini default_swift_reference = ref2 In the reference, a user can specify the following parameters: ``user`` A *project_name user_name* pair in the ``project_name:user_name`` format to authenticate against the Swift authentication service. ``key`` An authentication key for a user authenticating against the Swift authentication service. ``auth_address`` An address where the Swift authentication service is located. ``auth_version`` A version of the authentication service to use. Valid versions are ``2`` and ``3`` for Keystone and ``1`` (deprecated) for Swauth and Rackspace. Optional. Default: ``2`` ``project_domain_id`` A domain ID of the project which is the requested project-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` ``project_domain_name`` A domain name of the project which is the requested project-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` ``user_domain_id`` A domain ID of the user which is the requested domain-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` ``user_domain_name`` A domain name of the user which is the requested domain-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3``. ` Configuring the RBD Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Note**: the RBD storage backend requires the python bindings for librados and librbd. These are in the python-ceph package on Debian-based distributions. ``rbd_store_pool=POOL`` Optional. Default: ``rbd`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the RADOS pool in which images are stored. ``rbd_store_chunk_size=CHUNK_SIZE_MB`` Optional. Default: ``8`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two. ``rados_connect_timeout`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Prevents glance-api hangups during the connection to RBD. Sets the time to wait (in seconds) for glance-api before closing the connection. Setting ``rados_connect_timeout<=0`` means no timeout. ``rbd_store_ceph_conf=PATH`` Optional. Default: ``/etc/ceph/ceph.conf``, ``~/.ceph/config``, and ``./ceph.conf`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the Ceph configuration file to use. ``rbd_store_user=NAME`` Optional. Default: ``admin`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the RADOS user to authenticate as. This is only needed when `RADOS authentication `_ is enabled. A keyring must be set for this user in the Ceph configuration file, e.g. with a user ``glance``:: [client.glance] keyring=/etc/glance/rbd.keyring To set up a user named ``glance`` with minimal permissions, using a pool called ``images``, run:: rados mkpool images ceph-authtool --create-keyring /etc/glance/rbd.keyring ceph-authtool --gen-key --name client.glance --cap mon 'allow r' --cap osd 'allow rwx pool=images' /etc/glance/rbd.keyring ceph auth add client.glance -i /etc/glance/rbd.keyring Configuring the Cinder Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The cinder store gives you the ability to store images in volumes (one volume per image) in the Block Storage (Cinder) service. Glance does not have direct access to whatever backend(s) are configured for Cinder; it simply hands the image data over to the Block Storage service, and Cinder decides where exactly it will be stored. Glance can influence where the data will be stored by setting the ``cinder_volume_type`` option when configuring your cinder store. See below for details. **Note**: To create a Cinder volume from an image in this store quickly, additional settings are required. Please see the `Volume-backed image `_ documentation for more information. .. warning:: Because an Image-Volume created in a user account is susceptible to modifications by normal users that can corrupt the image, we recommend that service credentials should *always* be set in the configuration file so that the Image-Volume will be created in an internal project not directly accessible by non-service users. To create the Image-Volume in internal project, we need to set the following configuration parameters to the glance service user and the internal service project: * ``cinder_store_user_name`` * ``cinder_store_password`` * ``cinder_store_project_name`` * ``cinder_store_auth_address`` ``cinder_catalog_info=::`` Optional. Default: ``volumev2::publicURL`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Sets the info to match when looking for cinder in the service catalog. Format is : separated values of the form: :: ``cinder_endpoint_template=http://ADDR:PORT/VERSION/%(tenant)s`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Override service catalog lookup with template for cinder endpoint. ``%(...)s`` parts are replaced by the value in the request context. e.g. http://localhost:8776/v2/%(tenant)s ``os_region_name=REGION_NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Region name of this node. Deprecated. Use ``cinder_os_region_name`` instead. ``cinder_os_region_name=REGION_NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Region name of this node. If specified, it is used to locate cinder from the service catalog. ``cinder_ca_certificates_file=CA_FILE_PATH`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Location of ca certificates file to use for cinder client requests. ``cinder_http_retries=TIMES`` Optional. Default: ``3`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Number of cinderclient retries on failed http calls. ``cinder_state_transition_timeout`` Optional. Default: ``300`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Time period, in seconds, to wait for a cinder volume transition to complete. ``cinder_api_insecure=ON_OFF`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Allow to perform insecure SSL requests to cinder. ``cinder_store_user_name=NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` User name to authenticate against Cinder. If , the user of current context is used. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. These options are useful to put image volumes into the internal service project in order to hide the volume from users, and to make the image shareable among projects. ``cinder_store_user_domain_name=NAME`` Optional. Default: ``Default`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Domain of the user to authenticate against cinder. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. ``cinder_store_password=PASSWORD`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Password for the user authenticating against Cinder. If , the current context auth token is used. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. ``cinder_store_project_name=NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Project name where the image is stored in Cinder. If , the project in current context is used. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. ``cinder_store_project_domain_name=NAME`` Optional. Default: ``Default`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Domain of the project where the image volume is stored in cinder. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. ``cinder_store_auth_address=URL`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` The address where the Cinder authentication service is listening. If , the cinder endpoint in the service catalog is used. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. ``rootwrap_config=NAME`` Optional. Default: ``/etc/glance/rootwrap.conf`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Path to the rootwrap configuration file to use for running commands as root. ``lock_path`` Required. Defaults to environment variable OSLO_LOCK_PATH, though we recommend setting a value in the configuration file. This specifies the directory to use for lock files. NOTE: This option must be set in the ``[oslo_concurrency]`` section of the configuration file. .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/glance/tmp .. end .. _configuring-multiple-cinder-storage-backend: Configuring multiple Cinder Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From Victoria onwards Glance fully supports configuring multiple cinder stores by taking advantage of cinder ``volume-types``. Note that volume-types are defined by a Cinder administrator, and hence setting up multiple cinder stores will require collaboration with the Cinder admin. From the Glance side, you will add each of the cinder stores you want to define to the ``enabled_backends`` configuration option in glance configuration file. For each of these stores, you must then set the ``cinder_volume_type`` configuration option in the store's specific configuration section of glance-api.conf. What to set it to will depend on the volume-types that are available in Cinder; consult with your Cinder administrator to get a list of appropriate volume-types. .. warning:: It is mandatory to set the following configuration parameters for multiple cinder stores to work: * ``cinder_store_user_name`` * ``cinder_store_password`` * ``cinder_store_project_name`` * ``cinder_store_auth_address`` * ``cinder_volume_type`` This is because, when initializing the cinder store, we query cinder to validate the volume types set in the glance configuration file using the above credentials. If this is not validated during sevice start, we might fail to create the image later due to invalid volume type being configured. Below are some multiple cinder store configuration examples. Example 1: Fresh deployment For example, if cinder has configured 2 volume types `fast` and `slow` then glance configuration should look like;:: [DEFAULT] # list of enabled stores identified by their property group name enabled_backends = fast:cinder, slow:cinder # the default store, if not set glance-api service will not start [glance_store] default_backend = fast # conf props for fast store instance [fast] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-fast description = LVM based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. # conf props for slow store instance [slow] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-slow description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. Example 2: Upgrade from single cinder store to multiple cinder stores, if `default_volume_type` is set in `cinder.conf` and `cinder_volume_type` is also set in `glance-api.conf` then operator needs to create one store in glance where `cinder_volume_type` is same as the old glance configuration:: # cinder.conf The glance administrator has to find out what the default volume-type is in the cinder installation, so they need to discuss with either cinder admin or cloud admin to identify default volume-type from cinder and then explicitly configure that as the value of ``cinder_volume_type``. Example config before upgrade:: # old configuration in glance [glance_store] stores = cinder, file, http default_store = cinder cinder_state_transition_timeout = 300 rootwrap_config = /etc/glance/rootwrap.conf cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service cinder_catalog_info = volumev2::publicURL cinder_volume_type = glance-old Example config after upgrade:: # new configuration in glance [DEFAULT] enabled_backends = old:cinder, new:cinder [glance_store] default_backend = new [new] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-new description = LVM based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. [old] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-old # as per old cinder.conf description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. Example 3: Upgrade from single cinder store to multiple cinder stores, if `default_volume_type` is not set in `cinder.conf` neither `cinder_volume_type` set in `glance-api.conf` then administrator needs to create one store in glance to replicate exact old configuration:: # cinder.conf The glance administrator has to find out what the default volume-type is in the cinder installation, so they need to discuss with either cinder admin or cloud admin to identify default volume-type from cinder and then explicitly configure that as the value of ``cinder_volume_type``. Example config before upgrade:: # old configuration in glance [glance_store] stores = cinder, file, http default_store = cinder cinder_state_transition_timeout = 300 cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service rootwrap_config = /etc/glance/rootwrap.conf cinder_catalog_info = volumev2::publicURL Example config after upgrade:: # new configuration in glance [DEFAULT] enabled_backends = old:cinder, new:cinder [glance_store] default_backend = new # cinder store as per old (single store configuration) [old] rootwrap_config = /etc/glance/rootwrap.conf description = LVM based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. [new] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-new description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. Example 4: Upgrade from single cinder store to multiple cinder stores, if `default_volume_type` is set in `cinder.conf` but `cinder_volume_type` is not set in `glance-api.conf` then administrator needs to set `cinder_volume_type` same as the `default_backend` set in `cinder.conf` to one of the store:: # cinder.conf The glance administrator has to find out what the default volume-type is in the cinder installation, so they need to discuss with either cinder admin or cloud admin to identify default volume-type from cinder and then explicitly configure that as the value of ``cinder_volume_type``. Example config before upgrade:: # old configuration in glance [glance_store] stores = cinder, file, http default_store = cinder cinder_state_transition_timeout = 300 rootwrap_config = /etc/glance/rootwrap.conf cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service Example config after upgrade:: # new configuration in glance [DEFAULT] enabled_backends = old:cinder,new:cinder [glance_store] default_backend = old [old] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-old # as per old cinder.conf description = LVM based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. [new] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-new description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. Example 5: Upgrade from single cinder store to multiple cinder stores, if properties like ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address``, are not set in single store:: # old configuration in glance [glance_store] stores = cinder, file, http default_store = cinder cinder_state_transition_timeout = 300 rootwrap_config = /etc/glance/rootwrap.conf Example config after upgrade:: # new configuration in glance [DEFAULT] enabled_backends = new:cinder [glance_store] default_backend = new [new] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-new description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service # etc.. Since the cinder specific properties were not set in single store, the Image-Volumes would exist in user projects which needs to be transferred. After upgrading to multi store, you need to make sure all the Image-Volumes are transferred to the ``service`` project. Procedure: 1. Login to the user and project owning the Image-Volume and create a volume transfer request. .. code-block:: console openstack volume transfer request create Note down the ``id`` and ``auth_key`` as they will be used when accepting the transfer. 2. List the transfer request to verify the transfer was created successfully. .. code-block:: console openstack volume transfer request list 3. Login to the glance user and service project and accept the transfer. .. code-block:: console openstack volume transfer request accept transferID authKey 4. List the volumes to see if the Image-Volume was transferred successfully. .. code-block:: console openstack volume list --name Once all the Image-Volumes are migrated to the service project, you can list or show the images and it will update the location from old format to the new format. .. warning:: It is important to note that when upgrading from single store to multiple stores, the values for cinder store specific configuration parameters should remain the same before and after the upgrade. Example: Suppose you have the following credentials set in single store configuration:: [glance_store] stores = cinder, file, http default_store = cinder cinder_state_transition_timeout = 300 rootwrap_config = /etc/glance/rootwrap.conf cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service Then after the upgrade, the cinder specific paramter values for ``cinder_store_auth_address``, ``cinder_store_user_name``, ``cinder_store_password`` and ``cinder_store_project_name`` should be same:: [DEFAULT] enabled_backends = new:cinder [glance_store] default_backend = new [new] rootwrap_config = /etc/glance/rootwrap.conf cinder_volume_type = glance-new description = NFS based cinder store cinder_catalog_info = volumev2::publicURL cinder_store_auth_address = http://localhost/identity/v3 cinder_store_user_name = glance cinder_store_password = admin cinder_store_project_name = service While upgrading from single cinder stores to multiple single stores, location URLs for legacy images will be changed from ``cinder://volume-id`` to ``cinder://store-name/volume-id``. **Note**: After upgrade from single cinder store to use multiple cinder stores the first ``image-list`` or first ``GET`` or ``image-show`` call for image will take additional time as we will perform the lazy loading operation to update legacy image location url to use new image location urls. Subsequent ``GET`` or ``image-list`` or ``image-show`` calls will perform as they were performing earlier. Configuring the VMware Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``vmware_server_host=ADDRESS`` Required when using the VMware storage backend. Can only be specified in configuration files. Sets the address of the ESX/ESXi or vCenter Server target system. The address can contain an IP (``127.0.0.1``), an IP and port (``127.0.0.1:443``), a DNS name (``www.my-domain.com``) or DNS and port. `This option is specific to the VMware storage backend.` ``vmware_server_username=USERNAME`` Required when using the VMware storage backend. Can only be specified in configuration files. Username for authenticating with VMware ESX/ESXi or vCenter Server. ``vmware_server_password=PASSWORD`` Required when using the VMware storage backend. Can only be specified in configuration files. Password for authenticating with VMware ESX/ESXi or vCenter Server. ``vmware_datastores`` Required when using the VMware storage backend. This option can only be specified in configuration file and is specific to the VMware storage backend. vmware_datastores allows administrators to configure multiple datastores to save glance image in the VMware store backend. The required format for the option is: ::. where datacenter_path is the inventory path to the datacenter where the datastore is located. An optional weight can be given to specify the priority. Example:: vmware_datastores = datacenter1:datastore1 vmware_datastores = dc_folder/datacenter2:datastore2:100 vmware_datastores = datacenter1:datastore3:200 **NOTE**: * This option can be specified multiple times to specify multiple datastores. * Either vmware_datastore_name or vmware_datastores option must be specified in glance-api.conf * Datastore with weight 200 has precedence over datastore with weight 100. * If no weight is specified, default weight '0' is associated with it. * If two datastores have same weight, the datastore with maximum free space will be chosen to store the image. * If the datacenter path or datastore name contains a colon (:) symbol, it must be escaped with a backslash. ``vmware_api_retry_count=TIMES`` Optional. Default: ``10`` Can only be specified in configuration files. The number of times VMware ESX/VC server API must be retried upon connection related issues. ``vmware_task_poll_interval=SECONDS`` Optional. Default: ``5`` Can only be specified in configuration files. The interval used for polling remote tasks invoked on VMware ESX/VC server. ``vmware_store_image_dir`` Optional. Default: ``/openstack_glance`` Can only be specified in configuration files. The path to access the folder where the images will be stored in the datastore. ``vmware_api_insecure=ON_OFF`` Optional. Default: ``False`` Can only be specified in configuration files. Allow to perform insecure SSL requests to ESX/VC server. Configuring the S3 Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``s3_store_host`` Can only be specified in configuration files. The host where the S3 server is listening. This option can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com) or an IP address (127.0.0.1). Example:: s3_store_host = http://s3-ap-northeast-1.amazonaws.com s3_store_host = https://s3-ap-northeast-1.amazonaws.com s3_store_host = http://my-object-storage.com s3_store_host = https://my-object-storage.com:9000 ``s3_store_access_key`` Can only be specified in configuration files. Access Key for authenticating with the Amazon S3 or S3 compatible storage server. ``s3_store_secret_key`` Can only be specified in configuration files. Secret Key for authenticating with the Amazon S3 or S3 compatible storage server. ``s3_store_bucket`` Can only be specified in configuration files. Bucket name where the glance images will be stored in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be created automatically even if the bucket does not exist. ``s3_store_create_bucket_on_put`` Optional. Default: ``False`` Can only be specified in configuration files. Determine whether S3 should create a new bucket. This option takes boolean value to indicate whether or not Glance should create new bucket to S3 if it does not exist. ``s3_store_bucket_url_format`` Optional. Default: ``auto`` Can only be specified in configuration files. This option takes access model that is used to specify the address of an object in an S3 bucket. You can set the value from ``auto``, ``virtual`` or ``path``. **NOTE**: * In ``path``-style, the endpoint for the object looks like ``https://s3.amazonaws.com/bucket/example.img``. * In ``virtual``-style, the endpoint for the object looks like ``https://bucket.s3.amazonaws.com/example.img``. * If you do not follow the DNS naming convention in the bucket name, you can get objects in the path style, but not in the virtual style. ``s3_store_large_object_size`` Optional. Default: ``100`` Can only be specified in configuration files. What size, in MB, should S3 start chunking image files and do a multipart upload in S3. ``s3_store_large_object_chunk_size`` Optional. Default: ``10`` Can only be specified in configuration files. What multipart upload part size, in MB, should S3 use when uploading parts. ``s3_store_thread_pools`` Optional. Default: ``10`` Can only be specified in configuration files. The number of thread pools to perform a multipart upload in S3. Configuring the Storage Endpoint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``swift_store_endpoint=URL`` Optional. Default: ``None`` Can only be specified in configuration files. Overrides the storage URL returned by auth. The URL should include the path up to and excluding the container. The location of an object is obtained by appending the container and object to the configured URL. e.g. ``https://www.my-domain.com/v1/path_up_to_container`` Configuring Glance Image Size Limit ----------------------------------- The following configuration option is specified in the ``glance-api.conf`` configuration file in the section ``[DEFAULT]``. ``image_size_cap=SIZE`` Optional. Default: ``1099511627776`` (1 TB) Maximum image size, in bytes, which can be uploaded through the Glance API server. **IMPORTANT NOTE**: this value should only be increased after careful consideration and must be set to a value under 8 EB (9223372036854775808). Configuring Glance User Storage Quota ------------------------------------- The following configuration option is specified in the ``glance-api.conf`` configuration file in the section ``[DEFAULT]``. .. note:: As of the Xena release, Glance supports :ref:`per-tenant ` quotas with more granularity than the global limit provided by this option. You may want to enable per-tenant quotas and leave this unset. ``user_storage_quota`` Optional. Default: 0 (Unlimited). This value specifies the maximum amount of storage that each user can use across all storage systems. Optionally unit can be specified for the value. Values are accepted in B, KB, MB, GB or TB which are for Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. Default unit is Bytes. Example values would be, user_storage_quota=20GB .. _config-per-tenant-quotas: Configuring Glance Per-Tenant Quotas ------------------------------------ Glance can utilize per-tenant resource limits set in Keystone to enforce quotas on users. These limits must be registered with defaults in Keystone, with optional per-tenant overrides, prior to enabling them in Glance. To instruct glance to use limits in Keystone, set ``[DEFAULT]/use_keystone_limits=True`` in ``glance-api.conf``. Configuring the Image Cache --------------------------- Glance API servers can be configured to have a local image cache. Caching of image files is transparent and happens using a piece of middleware that can optionally be placed in the server application pipeline. This pipeline is configured in the PasteDeploy configuration file, -paste.ini. You should not generally have to edit this file directly, as it ships with ready-made pipelines for all common deployment flavors. Enabling the Image Cache Middleware ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the image cache middleware, the cache middleware must occur in the application pipeline **after** the appropriate context middleware. The cache middleware should be in your ``glance-api-paste.ini`` in a section titled ``[filter:cache]``. It should look like this:: [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory A ready-made application pipeline including this filter is defined in the ``glance-api-paste.ini`` file, looking like so:: [pipeline:glance-api-caching] pipeline = versionnegotiation context cache apiv1app To enable the above application pipeline, in your main ``glance-api.conf`` configuration file, select the appropriate deployment flavor like so:: [paste_deploy] flavor = caching Enabling the Image Cache Management Middleware (DEPRECATED) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is an optional ``cachemanage`` middleware that allows you to directly interact with cache images. Use this flavor in place of the ``cache`` flavor in your API configuration file. There are three types you can chose: ``cachemanagement``, ``keystone+cachemanagement`` and ``trusted-auth+cachemanagement``.:: [paste_deploy] flavor = keystone+cachemanagement The new cache management endpoints were introduced in Images API v. 2.13. If cache middleware is configured the new endpoints will be active and there is no need to use the cachemanagement middleware unless the old `glance-cache-manage` tooling is desired to be still used. Configuration Options Affecting the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: These configuration options must be set in both the glance-cache and glance-api configuration files. One main configuration file option affects the image cache. ``image_cache_dir=PATH`` Required when image cache middleware is enabled. Default: ``/var/lib/glance/image-cache`` This is the base directory the image cache can write files to. Make sure the directory is writable by the user running the ``glance-api`` server ``image_cache_driver=DRIVER`` Optional. Choice of ``sqlite``, ``xattr`` or ``centralized_db`` Default: ``centralized_db`` The default ``centralized_db`` cache driver has no special dependencies, other than ``worker_self_reference_url`` which needs to be configured to store the reference of node in the database. Earlier cache database used to be independent for each glance api service, now with ``centralized_db`` cache driver it stores information about the cached files at one place which is in a central database. Old records from SQLite database will be :ref:`migrated ` to central database on service restart during upgrade process. The ``sqlite`` cache driver has no special dependencies, other than the ``python-sqlite3`` library, which is installed on virtually all operating systems with modern versions of Python. It stores information about the cached files in a SQLite database. **NOTE** In Caracal release ``sqlite`` cache driver has been deprecated and will be removed in ``F`` development cycle. The ``xattr`` cache driver required the ``python-xattr>=0.6.0`` library and requires that the filesystem containing ``image_cache_dir`` have access times tracked for all files (in other words, the noatime option CANNOT be set for that filesystem). In addition, ``user_xattr`` must be set on the filesystem's description line in fstab. Because of these requirements, the ``xattr`` cache driver is not available on Windows. ``image_cache_sqlite_db=DB_FILE`` Optional. Default: ``cache.db`` When using the ``sqlite`` cache driver, you can set the name of the database that will be used to store the cached images information. The database is always contained in the ``image_cache_dir``. **NOTE** In Caracal release ``image_cache_sqlite_db`` option has been deprecated and will be removed in ``F`` development cycle. ``image_cache_max_size=SIZE`` Optional. Default: ``10737418240`` (10 GB) Size, in bytes, that the image cache should be constrained to. Images files are cached automatically in the local image cache, even if the writing of that image file would put the total cache size over this size. The ``glance-cache-pruner`` executable is what prunes the image cache to be equal to or less than this value. The ``glance-cache-pruner`` executable is designed to be run via cron on a regular basis. See more about this executable in :ref:`Controlling the Growth of the Image Cache ` .. _sqlite-to-centralized_db-migration: Migrating records from SQLite to Central database ------------------------------------------------- In case of upgrades/updates we need to deal with migrating existing records from SQLite database to central database. This operation will be performed one time during service startup. If SQLite database file, configured using ``image_cache_sqlite_db`` configuration option (default ``cache.db``) is present at service start and ``image_cache_driver`` is not set to ``centralized_db`` then we will read records from SQLite database and insert those in newly created ``cached_images`` table in central database. Once all records are migrated we will clear the SQLite database table and keep the SQLite database file as it is (to be deleted by administrator/operator later if required). Important point here is once deployer chooses to use ``centralized_db`` and we migrate their records out of SQLite database to central database, then we will not migrate them back if deployer wants to revert back to ``sqlite`` driver. Configuring Notifications ------------------------- Glance can optionally generate notifications to be logged or sent to a message queue. The configuration options are specified in the ``glance-api.conf`` configuration file. ``[oslo_messaging_notifications]/driver`` Optional. Default: ``noop`` Sets the notification driver used by oslo.messaging. Options include ``messaging``, ``messagingv2``, ``log`` and ``routing``. **NOTE** In M release, the``[DEFAULT]/notification_driver`` option has been deprecated in favor of ``[oslo_messaging_notifications]/driver``. For more information see :ref:`Glance notifications ` and `oslo.messaging `_. ``[DEFAULT]/disabled_notifications`` Optional. Default: ``[]`` List of disabled notifications. A notification can be given either as a notification type to disable a single event, or as a notification group prefix to disable all events within a group. Example: if this config option is set to ["image.create", "metadef_namespace"], then "image.create" notification will not be sent after image is created and none of the notifications for metadefinition namespaces will be sent. Configuring Glance Property Protections --------------------------------------- Access to image meta properties may be configured using a :ref:`Property Protections Configuration file `. The location for this file can be specified in the ``glance-api.conf`` configuration file in the section ``[DEFAULT]``. **If an incorrect value is specified, glance API service will not start.** ``property_protection_file=PATH`` Optional. Default: not enabled. If property_protection_file is set, the file may use either roles or policies to specify property protections. ``property_protection_rule_format=`` Optional. Default: ``roles``. Configuring Glance APIs ----------------------- The glance-api service implements versions 2 of the OpenStack Images API. Currently there are no options to enable or disable specific API versions. Configuring Glance Tasks ------------------------ Glance Tasks are implemented only for version 2 of the OpenStack Images API. The config value ``task_time_to_live`` is used to determine how long a task would be visible to the user after transitioning to either the ``success`` or the ``failure`` state. ``task_time_to_live=`` Optional. Default: ``48`` The config value ``task_executor`` is used to determine which executor should be used by the Glance service to process the task. The currently available implementation is: ``taskflow``. ``task_executor=`` Optional. Default: ``taskflow`` The ``taskflow`` engine has its own set of configuration options, under the ``taskflow_executor`` section, that can be tuned to improve the task execution process. Among the available options, you may find ``engine_mode`` and ``max_workers``. The former allows for selecting an execution model and the available options are ``serial``, ``parallel`` and ``worker-based``. The ``max_workers`` option, instead, allows for controlling the number of workers that will be instantiated per executor instance. The default value for the ``engine_mode`` is ``parallel``, whereas the default number of ``max_workers`` is ``10``. Configuring Glance performance profiling ---------------------------------------- Glance supports using osprofiler to trace the performance of each key internal handling, including RESTful API calling, DB operation and etc. ``Please be aware that Glance performance profiling is currently a work in progress feature.`` Although, some trace points is available, e.g. API execution profiling at wsgi main entry and SQL execution profiling at DB module, the more fine-grained trace point is being worked on. The config value ``enabled`` is used to determine whether fully enable profiling feature for glance-api service. ``enabled=`` Optional. Default: ``False`` There is one more configuration option that needs to be defined to enable Glance services profiling. The config value ``hmac_keys`` is used for encrypting context data for performance profiling. ``hmac_keys=`` Optional. Default: ``SECRET_KEY`` **IMPORTANT NOTE**: in order to make profiling work as designed operator needs to make those values of HMAC key be consistent for all services in their deployment. Without HMAC key the profiling will not be triggered even profiling feature is enabled. The config value ``trace_sqlalchemy`` is used to determine whether fully enable sqlalchemy engine based SQL execution profiling feature for glance-api service. ``trace_sqlalchemy=`` Optional. Default: ``False`` Configuring Glance public endpoint ---------------------------------- This setting allows an operator to configure the endpoint URL that will appear in the Glance "versions" response (that is, the response to ``GET /``\ ). This can be necessary when the Glance API service is run behind a proxy because the default endpoint displayed in the versions response is that of the host actually running the API service. If Glance is being run behind a load balancer, for example, direct access to individual hosts running the Glance API may not be allowed, hence the load balancer URL would be used for this value. ``public_endpoint=`` Optional. Default: ``None`` Configuring http_keepalive option --------------------------------- ``http_keepalive=`` If False, server will return the header "Connection: close", If True, server will return "Connection: Keep-Alive" in its responses. In order to close the client socket connection explicitly after the response is sent and read successfully by the client, you simply have to set this option to False when you create a wsgi server. Configuring the Health Check ---------------------------- This setting allows an operator to configure the endpoint URL that will provide information to load balancer if given API endpoint at the node should be available or not. Glance API server can be configured to expose a health check URL. To enable the health check middleware, it must occur in the beginning of the application pipeline. The health check middleware should be placed in your ``glance-api-paste.ini`` in a section titled ``[app:healthcheck]``. It should look like this:: [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable A ready-made composite including this application is defined e.g. in the ``glance-api-paste.ini`` file, looking like so:: [composite:glance-api] paste.composite_factory = glance.api:root_app_factory /: apiv2app /healthcheck: healthcheck For more information see `oslo.middleware `_. Configuring supported disk formats ---------------------------------- Each image in Glance has an associated disk format property. When creating an image the user specifies a disk format. They must select a format from the set that the Glance service supports. This supported set can be seen by querying the ``/v2/schemas/images`` resource. An operator can add or remove disk formats to the supported set. This is done by setting the ``disk_formats`` parameter which is found in the ``[image_format]`` section of ``glance-api.conf``. ``disk_formats=`` Optional. Default: ``ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/glance_api.rst0000664000175000017500000000022400000000000022604 0ustar00zuulzuul00000000000000.. _glance-api.conf: --------------- glance-api.conf --------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-api.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/glance_cache.rst0000664000175000017500000000023600000000000023101 0ustar00zuulzuul00000000000000.. _glance-cache.conf: ----------------- glance-cache.conf ----------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-cache.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/glance_manage.rst0000664000175000017500000000024300000000000023264 0ustar00zuulzuul00000000000000.. _glance-manage.conf: ------------------ glance-manage.conf ------------------ .. show-options:: :config-file: etc/oslo-config-generator/glance-manage.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/glance_policy.rst0000664000175000017500000000041400000000000023333 0ustar00zuulzuul00000000000000.. _policy.yaml: The following policies are shipped by default. Glance will assume a policy's default value if it's not explicitly overridden in the policy file. ----------- policy.yaml ----------- .. show-policy:: :config-file: etc/glance-policy-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/glance_scrubber.rst0000664000175000017500000000025500000000000023646 0ustar00zuulzuul00000000000000.. _glance-scrubber.conf: -------------------- glance-scrubber.conf -------------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-scrubber.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/index.rst0000664000175000017500000000063600000000000021640 0ustar00zuulzuul00000000000000.. _configuring: ============================ Glance Configuration Options ============================ This section provides a list of all possible options for each configuration file. Refer to :ref:`basic-configuration` for a detailed guide in getting started with various option settings. Glance uses the following configuration files for its various services. .. toctree:: :glob: :maxdepth: 1 * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/configuration/sample-configuration.rst0000664000175000017500000000340000000000000024647 0ustar00zuulzuul00000000000000.. _sample-configuration: =========================== Glance Sample Configuration =========================== The following are sample configuration files for all Glance services and utilities. These are generated from code and reflect the current state of code in the Glance repository. Sample configuration for Glance API ----------------------------------- This sample configuration can also be viewed in `glance-api.conf.sample <../_static/glance-api.conf.sample>`_. .. literalinclude:: ../_static/glance-api.conf.sample Sample configuration for Glance Scrubber ---------------------------------------- This sample configuration can also be viewed in `glance-scrubber.conf.sample <../_static/glance-scrubber.conf.sample>`_. .. literalinclude:: ../_static/glance-scrubber.conf.sample Sample configuration for Glance Manage -------------------------------------- This sample configuration can also be viewed in `glance-manage.conf.sample <../_static/glance-manage.conf.sample>`_. .. literalinclude:: ../_static/glance-manage.conf.sample Sample configuration for Glance Cache ------------------------------------- This sample configuration can also be viewed in `glance-cache.conf.sample <../_static/glance-cache.conf.sample>`_. .. literalinclude:: ../_static/glance-cache.conf.sample Sample Policy File for Glance ----------------------------- The following is a Glance sample policy file for adaptation and use. The sample policy can also be viewed in :download:`file form <../_static/glance.policy.yaml.sample>`. .. important:: The sample policy file is auto-generated from glance when this documentation is built. You must ensure your version of glance matches the version of this documentation. .. literalinclude:: ../_static/glance.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8222988 glance-29.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/architecture.rst0000664000175000017500000000736500000000000022724 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Basic architecture ================== OpenStack Glance has a client-server architecture that provides a REST API to the user through which requests to the server can be performed. A Glance Domain Controller manages the internal server operations that is divided into layers. Specific tasks are implemented by each layer. All the file (Image data) operations are performed using glance_store library, which is responsible for interaction with external storage back ends and (or) local filesystem(s). The glance_store library provides a uniform interface to access the backend stores. Glance uses a central database (Glance DB) that is shared amongst all the components in the system and is sql-based by default. Other types of database backends are somewhat supported and used by operators but are not extensively tested upstream. .. figure:: ../images/architecture.png :figwidth: 100% :align: center :alt: OpenStack Glance Architecture Diagram. Consists of 5 main blocks: "Client" "Glance" "Keystone" "Glance Store" and "Supported Storages". Glance block exposes a REST API. The REST API makes use of the AuthZ Middleware and a Glance Domain Controller, which contains Auth, Notifier, Policy, Quota, Location and DB. The Glance Domain Controller makes use of the Glance Store (which is external to the Glance block), and (still within the Glance block) it makes use of the Database Abstraction Layer, and (optionally) the Registry Layer. The Registry Layer makes use of the Database Abstraction Layer. The Database abstraction layer exclusively makes use of the Glance Database. The Client block makes use of the Rest API (which exists in the Glance block) and the Keystone block. The Glance Store block contains AuthN which makes use of the Keystone block, and it also contains Glance Store Drivers, which exclusively makes use of each of the storage systems in the Supported Storages block. Within the Supported Storages block, there exist the following storage systems, none of which make use of anything else: Filesystem, Swift, Ceph, "ellipses", Sheepdog. A complete list is given by the currently available drivers in glance_store/_drivers. .. centered:: Image 1. OpenStack Glance Architecture Following components are present in the Glance architecture: * **A client** - any application that makes use of a Glance server. * **REST API** - Glance functionalities are exposed via REST. * **Database Abstraction Layer (DAL)** - an application programming interface (API) that unifies the communication between Glance and databases. * **Glance Domain Controller** - middleware that implements the main Glance functionalities such as authorization, notifications, policies, database connections. * **Glance Store** - used to organize interactions between Glance and various data stores. * **Registry Layer** - optional layer that is used to organise secure communication between the domain and the DAL by using a separate service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/blueprints.rst0000664000175000017500000000631300000000000022421 0ustar00zuulzuul00000000000000Blueprints and Specs ==================== The Glance team uses the `glance-specs `_ repository for its specification reviews. Detailed information can be found `here `_. Please also find additional information in the reviews.rst file. The Glance team enforces a deadline for specs proposals. It's a soft freeze that happens after the first milestone is cut and before the second milestone is out. There's a freeze exception week that follows the freeze week. A new proposal can still be submitted during this period, but be aware that it will most likely be postponed unless a particularly good argument is made in favor of having an exception for it. Please note that we use a `template `_ for spec submissions. It is not required to fill out all sections in the template. Review of the spec may require filling in information left out by the submitter. Spec Notes ---------- There are occasions when a spec will be approved and the code will not land in the cycle it was targeted at. For these cases, the work flow to get the spec into the next release is as follows: * Anyone can propose a patch to glance-specs which moves a spec from the previous release into the new release directory. .. NOTE: mention the `approved`, `implemented` dirs The specs which are moved in this way can be fast-tracked into the next release. Please note that it is required to re-propose the spec for the new release however and that it'll be evaluated based on the resources available and cycle priorities. Glance Spec Lite ---------------- In addition to the heavy-duty design documents described above, we've made a provision for lightweight design documents for developers who have an idea for a small, uncontroversial change. In such a case, you can propose a *spec lite*, which is a quick description of what you want to do. You propose a spec-lite in the same way you propose a full spec: copy the `spec-lite template `_ in the **approved** directory for the release cycle in which you're proposing the change, fill out the appropriate sections, and put up a patch in gerrit. Lite spec Submission Guidelines ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before we dive into the guidelines for writing a good lite spec, it is worth mentioning that depending on your level of engagement with the Glance project and your role (user, developer, deployer, operator, etc.), you are more than welcome to have a preliminary discussion of a potential lite spec by reaching out to other people involved in the project. This usually happens by posting mails on the relevant mailing lists (e.g. `openstack-discuss `_ - include [glance] in the subject) or on #openstack-glance IRC channel on OFTC. If current ongoing code reviews are related to your feature, posting comments/questions on gerrit may also be a way to engage. Some amount of interaction with Glance developers will give you an idea of the plausibility and form of your lite spec before you submit it. That said, this is not mandatory. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/contributing.rst0000664000175000017500000002101000000000000022730 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with the Glance project, which is responsible for the following OpenStack deliverables: glance | The OpenStack Image service. | code: https://opendev.org/openstack/glance | docs: https://glance.openstack.org | api-ref: https://docs.openstack.org/api-ref/image | Launchpad: https://launchpad.net/glance glance_store | Glance's stores library. | code: https://opendev.org/openstack/glance_store | docs: https://docs.openstack.org/glance_store | Launchpad: https://launchpad.net/glance_store python-glanceclient | Python client library for the OpenStack Image API; includes a CLI shell. | code: https://opendev.org/openstack/python-glanceclient | docs: https://docs.openstack.org/python-glanceclient | Launchpad: https://launchpad.net/python-glanceclient See the ``CONTRIBUTING.rst`` file in each code repository for more information about contributing to that specific deliverable. Additionally, you should look over the docs links above; most components have helpful developer information specific to that deliverable. Communication ~~~~~~~~~~~~~ IRC People working on the Glance project may be found in the ``#openstack-glance`` channel on OFTC during working hours in their timezone. The channel is logged, so if you ask a question when no one is around, you can check the log to see if it's been answered: http://eavesdrop.openstack.org/irclogs/%23openstack-glance/ weekly meeting Thursdays at 14:00 UTC in ``#openstack-meeting`` on OFTC. Meetings are logged: http://eavesdrop.openstack.org/meetings/glance/ More information (including a link to the Agenda, some pointers on meeting etiquette, and an ICS file to put the meeting on your calendar) can be found at: http://eavesdrop.openstack.org/#Glance_Team_Meeting mailing list We use the openstack-discuss@lists.openstack.org mailing list for asynchronous discussions or to communicate with other OpenStack teams. Use the prefix ``[glance]`` in your subject line (it's a high-volume list, so most people use email filters). More information about the mailing list, including how to subscribe and read the archives, can be found at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss physical meet-ups The Glance project usually has a presence at the OpenDev/OpenStack Project Team Gathering that takes place at the beginning of each development cycle. Planning happens on an etherpad whose URL is announced at the weekly meetings and on the mailing list. Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ The glance-core team is an active group of contributors who are responsible for directing and maintaining the Glance project. As a new contributor, your interaction with this group will be mostly through code reviews, because only members of glance-core can approve a code change to be merged into the code repository. .. note:: Although your contribution will require reviews by members of glance-core, these aren't the only people whose reviews matter. Anyone with a gerrit account can post reviews, so you can ask other developers you know to review your code ... and you can review theirs. (A good way to learn your way around the codebase is to review other people's patches.) If you're thinking, "I'm new at this, how can I possibly provide a helpful review?", take a look at `How to Review Changes the OpenStack Way `_. You can learn more about the role of core reviewers in the OpenStack governance documentation: https://docs.openstack.org/contributors/common/governance.html#core-reviewer The membership list of glance-core is maintained in gerrit: https://review.opendev.org/#/admin/groups/13,members You can also find the members of the glance-core team at the Glance weekly meetings. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ The Glance project uses both "specs" and "blueprints" to track new features. Here's a quick rundown of what they are and how the Glance project uses them. specs | Exist in the glance-specs repository. Each spec must have a Launchpad blueprint (see below) associated with it for tracking purposes. | A spec is required for any new Glance core feature, anything that changes the Image API, or anything that entails a mass change to existing drivers. | The specs repository is: https://opendev.org/openstack/glance-specs | It contains a ``README.rst`` file explaining how to file a spec. | You can read rendered specs docs at: | https://specs.openstack.org/openstack/glance-specs/ blueprints | Exist in Launchpad, where they can be targeted to release milestones. | You file one at https://blueprints.launchpad.net/glance You can learn more about new feature planning: https://docs.openstack.org/glance/latest/contributor/blueprints.html Feel free to ask in ``#openstack-glance`` or at the weekly meeting if you have an idea you want to develop and you're not sure whether it requires a blueprint *and* a spec or simply a blueprint. The Glance project observes the following deadlines. For the current development cycle, the dates of each (and a more detailed description) may be found on the release schedule, which you can find from: https://releases.openstack.org/ * spec freeze (all specs must be approved by this date) * new driver merge deadline * new target driver merge deadline * new feature status checkpoint * third-party CI compliance checkpoint Additionally, the Glance project observes the OpenStack-wide deadlines, for example, final release of non-client libraries (glance_store), final release for client libraries (python-glanceclient), feature freeze, etc. These are also noted and explained on the release schedule for the current development cycle. Task Tracking ~~~~~~~~~~~~~ We track our tasks in Launchpad. See the top of the page for the URL of each Glance project deliverable. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag in the Bugs section. When you start working on a bug, make sure you assign it to yourself. Otherwise someone else may also start working on it, and we don't want to duplicate efforts. Also, if you find a bug in the code and want to post a fix, make sure you file a bug (and assign it to yourself!) just in case someone else comes across the problem in the meantime. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so in the Launchpad space for the affected deliverable: * glance: https://bugs.launchpad.net/glance * glance_store: https://bugs.launchpad.net/glance_store * python-glanceclient: https://bugs.launchpad.net/python-glanceclient Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ The Glance project policy is that a patch must have two +2s before it can be merged. (Exceptions are documentation changes, which require only a single +2, and specs, for which the PTL may require more than two +2s, depending on the complexity of the proposal.) Patches lacking unit tests are unlikely to be approved. In addition, some changes may require a release note. Any patch that changes functionality, adds functionality, or addresses a significant bug should have a release note. You can find more information about how to write a release note in the :ref:`release-notes` section of the Glance Contributors Guide. Keep in mind that the best way to make sure your patches are reviewed in a timely manner is to review other people's patches. We're engaged in a cooperative enterprise here. You can see who's been doing what with Glance recently in Stackalytics: https://www.stackalytics.com/report/activity?module=glance-group Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ All common PTL duties are enumerated in the `PTL guide `_. Additional responsibilities for the Glance PTL can be found by reading through the :ref:`managing-development` section of the Glance documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/core_reviewer_guidelines.rst0000664000175000017500000001335500000000000025306 0ustar00zuulzuul00000000000000=================== Glance Code Reviews =================== Code reviews are a critical component of all OpenStack projects. Glance accepts patches from many diverse people with diverse backgrounds, employers, and experience levels. Code reviews provide a way to enforce a level of consistency across the project, and also allow for the careful on-boarding of contributions from new contributors. Glance Spec Review Practices ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In addition to code reviews, Glance also maintains a BP specification git repository. Detailed instructions for the use of this repository are provided `here `_. It is expected that Glance core team members are actively reviewing specifications which are pushed out for review to the specification repository. Glance specs are approved/merged by the PTL only. The PTL can approve a spec if it has a +2 from any two core reviewers. Some guidelines around this process are provided below: * Before approving a spec, the PTL or other core reviewers should confirm that all the comments about the specification have been addressed. * The PTL reserves the right to decide which specifications are important and need to be approved for given cycle. * All specifications should be approved within 1 week after milestone-1 release. Specifications which are not approved by then can be discussed in the following weekly meeting and a decision will be made whether to grant an FFE or move them to next cycle. * The role of a core spec reviewer is to determine design fitness of the proposed change, as well as suitability for inclusion in the project. In order to do this, sufficient detail must be provided in the proposal, and core reviewers should iterate with the author until a suitable amount of information is included. Guidelines for core reviewers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Glance follows the code review guidelines as set forth for all OpenStack projects. It is expected that all reviewers are following the guidelines set forth on that `page `_. In addition to that, the following rules are to be followed: * Use of +W * For a documentation change any core can ninja approve the patch if everything is correct * For a patch which fixes a bug, the approver should ensure that: * Unit or Functional tests have been added/updated * A release note has been added if the bug is not trivial * The commit message is tagged with Closes-Bug: #bugid * For a patch which adds/implements a new feature, the approver should ensure that: * Documentation is updated to explain how the new feature will work * If an API call is added/modified, the API reference doc should be updated accordingly * Tempest/CI coverage is proposed/available for the new feature * Client side changes are addressed if required * Use of -2 * A -2 should be used to indicate that a patch or change cannot be allowed to merge because of a fundamental conflict with the scope or goals of the project. It should not be used in cases where the patch could be altered to address feedback, or where further discussion is likely to lead to an alternative implementation that would be suitable. * A -2 review should always be accompanied by a comment explaining the reason that the change does not fit with the project goals, so that the submitter can understand the reasons and refocus their future contributions more productively. * The core team should come to an agreement if there is a difference of opinion about the suitability of the patch. * If a majority of the team is in favor of moving forward with the patch then the person who added these -2(s) will change it to -1 if they still don't agree with the implementation. As an open source team, we operate on consensus of multiple people and do not support individual members acting against the will of the others. * The PTL reserves the right to ask a core reviewer to change their -2 vote to a -1. * Procedural use of -2 * In some situations, a core reviewer will put a -2 vote on a patch to "hold" it temporarily from merging due to some procedural criteria. This may be used on feature changes after Feature Freeze and before branching for the next release, to ensure that no features are unintentionally merged during the freeze. * It may also be used to avoid merging something that depends on a corresponding patch in another tree, or some job configuration change that would otherwise result in a breakage if merged too soon. The person who added these -2s will then remove them again once the blocking issue has cleared. * When placing the -2, they should leave a comment explaining exactly what is happening, that the -2 is "procedural" and provide a timeline or criteria after which the vote will be dropped. Submitters can continue to revise the change during the freeze. * Use of -W * A Workflow -1 vote indicates that the change is not currently ready for a comprehensive review and is intended for the original submitter to indicate to reviewers that they do not expect the patch to be mergeable. Only core reviewers and the original change owner can vote Workflow -1 on a given patch. Any workflow votes are cleared when a new patch set is submitted for the change. This is a better way to get feedback on ongoing work than the legacy method of a Draft change (which is hidden from reviewers not specifically added to it). * Core reviewers may also use the Workflow -1 vote to prevent a change from being merged during some temporary condition, without interrupting the code-review process. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/database_architecture.rst0000664000175000017500000002437500000000000024550 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Glance database architecture ============================ Glance Database Public API ~~~~~~~~~~~~~~~~~~~~~~~~~~ The Glance Database API contains several methods for moving image metadata to and from persistent storage. You can find a list of public methods grouped by category below. Common parameters for image methods ----------------------------------- The following parameters can be applied to all of the image methods below: - ``context`` — corresponds to a glance.context.RequestContext object, which stores the information on how a user accesses the system, as well as additional request information. - ``image_id`` — a string corresponding to the image identifier. - ``memb_id`` — a string corresponding to the member identifier of the image. Image basic methods ------------------- **Image processing methods:** #. ``image_create(context, values)`` — creates a new image record with parameters listed in the *values* dictionary. Returns a dictionary representation of a newly created *glance.db.sqlalchemy.models.Image* object. #. ``image_update(context, image_id, values, purge_props=False, from_state=None)`` — updates the existing image with the identifier *image_id* with the values listed in the *values* dictionary. Returns a dictionary representation of the updated *Image* object. Optional parameters are: - ``purge_props`` — a flag indicating that all the existing properties not listed in the *values['properties']* should be deleted; - ``from_state`` — a string filter indicating that the updated image must be in the specified state. #. ``image_destroy(context, image_id)`` — deletes all database records of an image with the identifier *image_id* (like tags, properties, and members) and sets a 'deleted' status on all the image locations. #. ``image_get(context, image_id, force_show_deleted=False)`` — gets an image with the identifier *image_id* and returns its dictionary representation. The parameter *force_show_deleted* is a flag that indicates to show image info even if it was 'deleted', or its 'pending_delete' statuses. #. ``image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False)`` — gets all the images that match zero or more filters. Optional parameters are: - ``filters`` — dictionary of filter keys and values. If a 'properties' key is present, it is treated as a dictionary of key/value filters in the attribute of the image properties. - ``marker`` — image id after which a page should start. - ``limit`` — maximum number of images to return. - ``sort_key`` — list of image attributes by which results should be sorted. - ``sort_dir`` — direction in which results should be sorted (asc, desc). - ``member_status`` — only returns shared images that have this membership status. - ``is_public`` — if true, returns only public images. If false, returns only private and shared images. - ``admin_as_user`` — for backwards compatibility. If true, an admin sees the same set of images that would be seen by a regular user. - ``return_tag`` — indicates whether an image entry in the result includes its relevant tag entries. This can improve upper-layer query performance and avoid using separate calls. Image location methods ---------------------- **Image location processing methods:** #. ``image_location_add(context, image_id, location)`` — adds a new location to an image with the identifier *image_id*. This location contains values listed in the dictionary *location*. #. ``image_location_update(context, image_id, location)`` — updates an existing location with the identifier *location['id']* for an image with the identifier *image_id* with values listed in the dictionary *location*. #. ``image_location_delete(context, image_id, location_id, status, delete_time=None)`` — sets a 'deleted' or 'pending_delete' *status* to an existing location record with the identifier *location_id* for an image with the identifier *image_id*. Image property methods ---------------------- .. warning:: There is no public property update method. So if you want to modify it, you have to delete it first and then create a new one. **Image property processing methods:** #. ``image_property_create(context, values)`` — creates a property record with parameters listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of a newly created *ImageProperty* object. #. ``image_property_delete(context, prop_ref, image_ref)`` — deletes an existing property record with a name *prop_ref* for an image with the identifier *image_ref*. Image member methods -------------------- **Methods to handle image memberships:** #. ``image_member_create(context, values)`` — creates a member record with properties listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of a newly created *ImageMember* object. #. ``image_member_update(context, memb_id, values)`` — updates an existing member record with properties listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of an updated member record. #. ``image_member_delete(context, memb_id)`` — deletes an existing member record with *memb_id*. #. ``image_member_find(context, image_id=None, member=None, status=None)`` — returns all members for a given context with optional image identifier (*image_id*), member name (*member*), and member status (*status*) parameters. #. ``image_member_count(context, image_id)`` — returns a number of image members for an image with *image_id*. Image tag methods ----------------- **Methods to process images tags:** #. ``image_tag_set_all(context, image_id, tags)`` — changes all the existing tags for an image with *image_id* to the tags listed in the *tags* param. To remove all tags, a user just should provide an empty list. #. ``image_tag_create(context, image_id, value)`` — adds a *value* to tags for an image with *image_id*. Returns the value of a newly created tag. #. ``image_tag_delete(context, image_id, value)`` — removes a *value* from tags for an image with *image_id*. #. ``image_tag_get_all(context, image_id)`` — returns a list of tags for a specific image. Image info methods ------------------ The next two methods inform a user about his or her ability to modify and view an image. The *image* parameter here is a dictionary representation of an *Image* object. #. ``is_image_mutable(context, image)`` — informs a user about the possibility to modify an image with the given context. Returns True if the image is mutable in this context. #. ``is_image_visible(context, image, status=None)`` — informs about the possibility to see the image details with the given context and optionally with a status. Returns True if the image is visible in this context. **Glance database schema** .. figure:: ../images/glance_db.png :figwidth: 100% :align: center :alt: The glance database schema is depicted by 5 tables. The table named Images has the following columns: id: varchar(36); name: varchar(255), nullable; size: bigint(20), nullable; status: varchar(30); is_public: tinyint(1); created_at: datetime; updated_at: datetime, nullable; deleted_at: datetime, nullable; deleted: tinyint(1); disk_format: varchar(20), nullable; container_format: varchar(20), nullable; checksum: varchar(32), nullable; owner: varchar(255), nullable min_disk: int(11); min_ram: int(11); protected: tinyint(1); and virtual_size: bigint(20), nullable;. The table named image_locations has the following columns: id: int(11), primary; image_id: varchar(36), refers to column named id in table Images; value: text; created_at: datetime; updated_at: datetime, nullable; deleted_at: datetime, nullable; deleted: tinyint(1); meta_data: text, nullable; and status: varchar(30);. The table named image_members has the following columns: id: int(11), primary; image_id: varchar(36), refers to column named id in table Images; member: varchar(255); can_share: tinyint(1); created_at: datetime; updated_at: datetime, nullable; deleted_at: datetime, nullable; deleted: tinyint(1); and status: varchar(20;. The table named image_tags has the following columns: id: int(11), primary; image_id: varchar(36), refers to column named id in table Images; value: varchar(255); created_at: datetime; updated_at: datetime, nullable; deleted_at: datetime, nullable; and deleted: tinyint(1);. The table named image_properties has the following columns: id: int(11), primary; image_id: varchar(36), refers to column named id in table Images; name: varchar(255); value: text, nullable; created_at: datetime; updated_at: datetime, nullable; deleted_at: datetime, nullable; and deleted: tinyint(1);. .. centered:: Image 1. Glance images DB schema Glance Database Backends ~~~~~~~~~~~~~~~~~~~~~~~~ Metadata Backends ----------------- .. list-plugins:: glance.database.metadata_backend :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/database_migrations.rst0000664000175000017500000003165500000000000024241 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================================== Writing Database Migrations for Zero-Downtime Upgrades ====================================================== Beginning in Ocata, OpenStack Glance uses Alembic, which replaced SQLAlchemy Migrate as the database migration engine. Moving to Alembic is particularly motivated by the zero-downtime upgrade work. Refer to [GSPEC1]_ and [GSPEC2]_ for more information on zero-downtime upgrades in Glance and why a move to Alembic was deemed necessary. Stop right now and go read [GSPEC1]_ and [GSPEC2]_ if you haven't done so already. Those documents explain the strategy Glance has approved for database migrations, and we expect you to be familiar with them in what follows. This document focuses on the "how", but unless you understand the "what" and "why", you'll be wasting your time reading this document. Prior to Ocata, database migrations were conceived as monoliths. Thus, they did not need to carefully distinguish and manage database schema expansions, data migrations, or database schema contractions. The modern database migrations are more sensitive to the characteristics of changes being attempted and thus we clearly identify three phases of a database migration: (1) expand, (2) migrate, and (3) contract. A developer modifying the Glance database must supply a script for each of these phases. Here's a quick reminder of what each phase entails. For more information, see [GSPEC1]_. Expand Expand migrations MUST be additive in nature. Expand migrations should be seen as the minimal set of schema changes required by the new services that can be applied while the old services are still running. Expand migrations should optionally include temporary database triggers that keep the old and new columns in sync. If a database change needs data to be migrated between columns, then temporary database triggers are required to keep the columns in sync while the data migrations are in-flight. .. note:: Sometimes there could be an exception to the additive-only change strategy for expand phase. It is described more elaborately in [GSPEC1]_. Again, consider this as a last reminder to read [GSPEC1]_, if you haven't already done so. Migrate Data migrations MUST NOT attempt any schema changes and only move existing data between old and new columns such that new services can start consuming the new tables and/or columns introduced by the expand migrations. Contract Contract migrations usually include the remaining schema changes required by the new services that couldn't be applied during expand phase due to their incompatible nature with the old services. Any temporary database triggers added during the expand migrations MUST be dropped with contract migrations. Alembic Migrations ================== As mentioned earlier, starting in Ocata Glance database migrations must be written for Alembic. All existing Glance migrations have been ported to Alembic. They can be found here [GMIGS1]_. Schema Migrations (Expand/Contract) ----------------------------------- * All Glance schema migrations must reside in ``glance.db.sqlalchemy.alembic_migrations.versions`` package * Every Glance schema migration must be a python module with the following structure .. code:: """ Revision ID: Revises: """ revision = down_revision = depends_on = def upgrade(): Identifiers ``revision``, ``down_revision`` and ``depends_on`` are elaborated below. * The ``revision`` identifier is a unique revision id for every migration. It must conform to one of the following naming schemes. All monolith migrations must conform to: .. code:: And, all expand/contract migrations must conform to: .. code:: _[expand|contract] Example: .. code:: Monolith migration: ocata01 Expand migration: ocata_expand01 Contract migration: ocata_contract01 This name convention is devised with an intention to easily understand the migration sequence. While the ```` mentions the release a migration belongs to, the ```` helps identify the order of migrations within each release. For modern migrations, the ``[expand|contract]`` part of the revision id helps identify the revision branch a migration belongs to. * The ``down_revision`` identifier MUST be specified for all Alembic migration scripts. It points to the previous migration (or ``revision`` in Alembic lingo) on which the current migration is based. This essentially establishes a migration sequence very much a like a singly linked list would (except that we use a ``previous`` link here instead of the more traditional ``next`` link.) The very first migration, ``liberty`` in our case, would have ``down_revision`` set to ``None``. All other migrations must point to the last migration in the sequence at the time of writing the migration. For example, Glance has two migrations in Mitaka, namely, ``mitaka01`` and ``mitaka02``. The migration sequence for Mitaka should look like: .. code:: liberty ^ | | mitaka01 ^ | | mitaka02 * The ``depends_on`` identifier helps establish dependencies between two migrations. If a migration ``X`` depends on running migration ``Y`` first, then ``X`` is said to depend on ``Y``. This could be specified in the migration as shown below: .. code:: revision = 'X' down_revision = 'W' depends_on = 'Y' Naturally, every migration depends on the migrations preceding it in the migration sequence. Hence, in a typical branch-less migration sequence, ``depends_on`` is of limited use. However, this could be useful for migration sequences with branches. We'll see more about this in the next section. * All schema migration scripts must adhere to the naming convention mentioned below: .. code:: _.py Example: .. code:: Monolith migration: ocata01_add_visibility_remove_is_public.py Expand migration: ocata_expand01_add_visibility.py Contract migration: ocata_contract01_remove_is_public.py Dependency Between Contract and Expand Migrations ------------------------------------------------- * To achieve zero-downtime upgrades, the Glance migration sequence has been branched into ``expand`` and ``contract`` branches. As the name suggests, the ``expand`` branch contains only the expand migrations and the ``contract`` branch contains only the contract migrations. As per the zero-downtime migration strategy, the expand migrations are run first followed by contract migrations. To establish this dependency, we make the contract migrations explicitly depend on their corresponding expand migrations. Thus, running contract migrations without running expansions is not possible. For example, the Community Images migration in Ocata includes the experimental E-M-C migrations. The expand migration is ``ocata_expand01`` and the contract migration is ``ocata_contract01``. The dependency is established as below. .. code:: revision = 'ocata_contract01' down_revision = 'mitaka02' depends_on = 'ocata_expand01' Every contract migration in Glance MUST depend on its corresponding expand migration. Thus, the current Glance migration sequence looks as shown below: .. code:: liberty ^ | | mitaka01 ^ | | mitaka02 ^ | +------------+------------+ | | | | ocata_expand01 <------ ocata_contract01 ^ ^ | | | | pike_expand01 <------ pike_contract01 Data Migrations --------------- * All Glance data migrations must reside in ``glance.db.sqlalchemy.alembic_migrations.data_migrations`` package. * The data migrations themselves are not Alembic migration scripts. And, hence they don't require a unique revision id. However, they must adhere to a similar naming convention discussed above. That is: .. code:: _migrate_.py Example: .. code:: Data Migration: ocata_migrate01_community_images.py * All data migrations modules must adhere to the following structure: .. code:: def has_migrations(engine): return def migrate(engine): return NOTES ----- * In Ocata and Pike, Glance required every database migration to include both monolithic and Expand-Migrate-Contract (E-M-C) style migrations. In Queens, E-M-C migrations became the default and a monolithic migration script is no longer required. In Queens, the glance-manage tool was refactored so that the ``glance-manage db sync`` command runs the expand, migrate, and contract scripts "under the hood". From the viewpoint of the operator, there is no difference between having a single monolithic script and having three scripts. Since we are using the same scripts for offline and online (zero-downtime) database upgrades, as a developer you have to pay attention in your scripts to determine whether you need to add/remove triggers in the expand/contract scripts. See the changes to the ocata scripts in https://review.opendev.org/#/c/544792/ for an example of how to do this. * Alembic is a database migration engine written for SQLAlchemy. So, any migration script written for SQLAlchemy Migrate should work with Alembic as well provided the structural differences above (primarily adding ``revision``, ``down_revision`` and ``depends_on``) are taken care of. Moreover, it maybe easier to do certain operations with Alembic. Refer to [ALMBC]_ for information on Alembic operations. * A given database change may not require actions in each of the expand, migrate, contract phases, but nonetheless, we require a script for *each* phase for *every* change. In the case where an action is not required, a ``no-op`` script, described below, MUST be used. For instance, if a database migration is completely contractive in nature, say removing a column, there won't be a need for expand and migrate operations. But, including a ``no-op`` expand and migrate scripts will make it explicit and also preserve the one-to-one correspondence between expand, migrate and contract scripts. A no-op expand/contract Alembic migration: .. code:: """An example empty Alembic migration script Revision ID: foo02 Revises: foo01 """ revision = foo02 down_revision = foo01 def upgrade(): pass A no-op migrate script: .. code:: """An example empty data migration script""" def has_migrations(engine): return False def migrate(engine): return 0 References ========== .. [GSPEC1] `Database Strategy for Rolling Upgrades `_ .. [GSPEC2] `Glance Alembic Migrations Spec `_ .. [GMIGS1] `Glance Alembic Migrations Implementation `_ .. [ALMBC] `Alembic Operations `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/documentation.rst0000664000175000017500000000730400000000000023104 0ustar00zuulzuul00000000000000Documentation ============= Tips for Doc Writers (and Developers, too!) ------------------------------------------- Here are some useful tips about questions that come up a lot but aren't always easy to find answers to. * Make example URLs consistent For consistency, example URLs for openstack components are in the form: .. code:: project.openstack.example.org So, for example, an example image-list call to Glance would use a URL written like this: .. code:: http://glance.openstack.example.org/v2/images * URLs for OpenStack project documentation Each project's documentation is published to the following URLs: - ``https://docs.openstack.org/$project-name/latest`` - built from master - ``https://docs.openstack.org/$project-name/$series`` - built from stable For example, the Glance documentation is published to: - ``https://docs.openstack.org/glance/latest`` - built from master - ``https://docs.openstack.org/glance/ocata`` - built from stable/ocata * URLs for OpenStack API Reference Guides Each project's API Reference Guide is published to: - ``https://docs.openstack.org/api-ref/$service-type`` For example, the Glance Image Service API Reference guide is published to: - ``https://docs.openstack.org/api-ref/image`` Where to Contribute ------------------- There are a few different kinds of documentation associated with Glance to which you may want to contribute: * Configuration As you read through the sample configuration files in the ``etc`` directory in the source tree, you may find typographical errors, or grammatical problems, or text that could use clarification. The Glance team welcomes your contributions, but please note that the sample configuration files are generated, not static text. Thus you must modify the source code where the particular option you're correcting is defined and then re-generate the conf file using ``tox -e genconfig``. * Glance's Documentation The Glance Documentation (what you're reading right now) lives in the source code tree under ``doc/source``. It consists of information for developers working on Glance, information for consumers of the OpenStack Images APIs implemented by Glance, and information for operators deploying Glance. Thus there's a wide range of documents to which you could contribute. Small improvements can simply be addressed by a patch, but it's probably a good idea to first file a bug for larger changes so they can be tracked more easily (especially if you plan to submit several different patches to address the shortcoming). * User Guides There are several user guides published by the OpenStack Documentation Team. Please see the README in their code repository for more information: https://github.com/openstack/openstack-manuals * OpenStack API Reference There's a "quick reference" guide to the APIs implemented by Glance: https://docs.openstack.org/api-ref/image/ The guide is generated from source files in the source code tree under ``api-ref/source``. Corrections in spelling or typographical errors may be addressed directly by a patch. If you note a divergence between the API reference and the actual behavior of Glance, please file a bug before submitting a patch. Additionally, now that the quick reference guides are being maintained by each project (rather than a central team), you may note divergences in format between the Glance guides and those of other teams. For example, some projects may have adopted an informative new way to display error codes. If you notice structural improvements that our API reference is missing, please file a bug. And, of course, we would also welcome your patch implementing the improvement! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/domain_implementation.rst0000664000175000017500000001154500000000000024611 0ustar00zuulzuul00000000000000.. Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Glance domain model implementation ================================== Gateway and basic layers ~~~~~~~~~~~~~~~~~~~~~~~~ The domain model contains the following layers: #. :ref:`authorization` #. :ref:`property` #. :ref:`notifier` #. :ref:`policy` #. :ref:`quota` #. :ref:`location` #. :ref:`database` The schema below shows a stack that contains the Image domain layers and their locations: .. figure:: ../images/glance_layers.png :figwidth: 100% :align: center :alt: From top to bottom, the stack consists of the Router and REST API, which are above the domain implementation. The Auth, Property Protection (optional), Notifier, Policy, Quota, Location, and Database represent the domain implementation. The Data Access sit below the domain implementation. Further, the Client block calls the Router; the Location block calls the Glance Store, and the Data Access layer calls the DBMS. Additional information conveyed in the image is the location in the Glance code of the various components: Router: api/v2/router.py REST API: api/v2/* Auth: api/authorization.py Property Protection: api/property_protections.py Notifier: notifier.py Policy: api/policy.py Quota: quota/__init__.py Location: location.py DB: db/__init__.py Data Access: db/sqlalchemy/api.py .. _authorization: Authorization ------------- The first layer of the domain model provides a verification of whether an image itself or its property can be changed. An admin or image owner can apply the changes. The information about a user is taken from the request ``context`` and is compared with the image ``owner``. If the user cannot apply a change, a corresponding error message appears. .. _property: Property protection ------------------- The second layer of the domain model is optional. It becomes available if you set the ``property_protection_file`` parameter in the Glance configuration file. There are two types of image properties in Glance: * *Core properties*, as specified in the image schema * *Meta properties*, which are the arbitrary key/value pairs that can be added to an image The property protection layer manages access to the meta properties through Glance's public API calls. You can restrict the access in the property protection configuration file. .. _notifier: Notifier -------- On the third layer of the domain model, the following items are added to the message queue: #. Notifications about all of the image changes #. All of the exceptions and warnings that occurred while using an image .. _policy: Policy ------ The fourth layer of the domain model is responsible for: #. Defining access rules to perform actions with an image. The rules are defined in the :file:`etc/policy.yaml` file. #. Monitoring of the rules implementation. .. _quota: Quota ----- On the fifth layer of the domain model, if a user has an admin-defined size quota for all of his uploaded images, there is a check that verifies whether this quota exceeds the limit during an image upload and save: * If the quota does not exceed the limit, then the action to add an image succeeds. * If the quota exceeds the limit, then the action does not succeed and a corresponding error message appears. .. _location: Location -------- The sixth layer of the domain model is used for interaction with the store via the ``glance_store`` library, like upload and download, and for managing an image location. On this layer, an image is validated before the upload. If the validation succeeds, an image is written to the ``glance_store`` library. This sixth layer of the domain model is responsible for: #. Checking whether a location URI is correct when a new location is added #. Removing image data from the store when an image location is changed #. Preventing image location duplicates .. _database: Database -------- On the seventh layer of the domain model: * The methods to interact with the database API are implemented. * Images are converted to the corresponding format to be recorded in the database. And the information received from the database is converted to an Image object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/domain_model.rst0000664000175000017500000002357600000000000022673 0ustar00zuulzuul00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============ Domain model ============ The main goal of a domain model is refactoring the logic around object manipulation by splitting it to independent layers. Each subsequent layer wraps the previous one creating an "onion" structure, thus realizing a design pattern called "Decorator." The main feature of domain model is to use a composition instead of inheritance or basic decoration while building an architecture. This provides flexibility and transparency of an internal organization for a developer, because they do not know what layers are used and works with a domain model object as with a common object. Inner architecture ~~~~~~~~~~~~~~~~~~ Each layer defines its own operations' implementation through a special ``proxy`` class. At first, operations are performed on the upper layer, then they successively pass the control to the underlying layers. The nesting of layers can be specified explicitly using a programmer interface Gateway or implicitly using ``helper`` classes. Nesting may also depend on various conditions, skipping or adding additional layers during domain object creation. Proxies ~~~~~~~ The layer behavior is described in special ``proxy`` classes that must provide exactly the same interface as the original class does. In addition, each ``proxy`` class has a field ``base`` indicating a lower layer object that is an instance of another ``proxy`` or ``original`` class. To access the rest of the fields, you can use special ``proxy`` properties or universal methods ``set_property`` and ``get_property``. In addition, the ``proxy`` class must have an ``__init__`` format method:: def __init__(self, base, helper_class=None, helper_kwargs=None, **kwargs) where ``base`` corresponds to the underlying object layer, ``proxy_class`` and ``proxy_kwargs`` are optional and are used to create a ``helper`` class. Thus, to access a ``meth1`` method from the underlying layer, it is enough to call it on the ``base`` object:: def meth1(*args, **kwargs): … self.base.meth1(*args, **kwargs) … To get access to the domain object field, it is recommended to use properties that are created by an auxiliary function:: def _create_property_proxy(attr): def get_attr(self): return getattr(self.base, attr) def set_attr(self, value): return setattr(self.base, attr, value) def del_attr(self): return delattr(self.base, attr) return property(get_attr, set_attr, del_attr) So, the reference to the underlying layer field ``prop1`` looks like:: class Proxy(object): … prop1 = _create_property_proxy('prop1') … If the number of layers is big, it is reasonable to create a common parent ``proxy`` class that provides further control transfer. This facilitates the writing of specific layers if they do not provide a particular implementation of some operation. Gateway ~~~~~~~ ``gateway`` is a mechanism to explicitly specify a composition of the domain model layers. It defines an interface to retrieve the domain model object based on the ``proxy`` classes described above. Example of the gateway implementation ------------------------------------- This example defines three classes: * ``Base`` is the main class that sets an interface for all the ``proxy`` classes. * ``LoggerProxy`` class implements additional logic associated with the logging of messages from the ``print_msg`` method. * ``ValidatorProxy`` class implements an optional check that helps to determine whether all the parameters in the ``sum_numbers`` method are positive. :: class Base(object): ""Base class in domain model.""" msg = "Hello Domain" def print_msg(self): print(self.msg) def sum_numbers(self, *args): return sum(args) class LoggerProxy(object): """"Class extends functionality by writing message to log.""" def __init__(self, base, logg): self.base = base self.logg = logg # Proxy to provide implicit access to inner layer. msg = _create_property_proxy('msg') def print_msg(self): # Write message to log and then pass the control to inner layer. self.logg.write("Message %s has been written to the log") % self.msg self.base.print_msg() def sum_numbers(self, *args): # Nothing to do here. Just pass the control to the next layer. return self.base.sum_numbers(*args) class ValidatorProxy(object): """Class validates that input parameters are correct.""" def __init__(self, base): self.base = base msg = _create_property_proxy('msg') def print_msg(self): # There are no checks. self.base.print_msg() def sum_numbers(self, *args): # Validate input numbers and pass them further. for arg in args: if arg <= 0: return "Only positive numbers are supported." return self.base.sum_numbers(*args) Thus, the ``gateway`` method for the above example may look like: :: def gateway(logg, only_positive=True): base = Base() logger = LoggerProxy(base, logg) if only_positive: return ValidatorProxy(logger) return logger domain_object = gateway(sys.stdout, only_positive=True) It is important to consider that the order of the layers matters. And even if layers are logically independent from each other, rearranging them in different order may lead to another result. Helpers ~~~~~~~ ``Helper`` objects are used for an implicit nesting assignment that is based on a specification described in an auxiliary method (similar to ``gateway``). This approach may be helpful when using a *simple factory* for generating objects. Such a way is more flexible as it allows specifying the wrappers dynamically. The ``helper`` class is unique for all the ``proxy`` classes and it has the following form: :: class Helper(object): def __init__(self, proxy_class=None, proxy_kwargs=None): self.proxy_class = proxy_class self.proxy_kwargs = proxy_kwargs or {} def proxy(self, obj): """Wrap an object.""" if obj is None or self.proxy_class is None: return obj return self.proxy_class(obj, **self.proxy_kwargs) def unproxy(self, obj): """Return object from inner layer.""" if obj is None or self.proxy_class is None: return obj return obj.base Example of a simple factory implementation ------------------------------------------ Here is a code of a *simple factory* for generating objects from the previous example. It specifies a ``BaseFactory`` class with a ``generate`` method and related ``proxy`` classes: :: class BaseFactory(object): """Simple factory to generate an object.""" def generate(self): return Base() class LoggerFactory(object): """Proxy class to add logging functionality.""" def __init__(self, base, logg, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base self.logg = logg def generate(self): return self.helper.proxy(self.base.generate()) class ValidatorFactory(object): """Proxy class to add validation.""" def __init__(self, base, only_positive=True, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base self.only_positive = only_positive def generate(self): if self.only_positive: # Wrap in ValidatorProxy if required. return self.helper.proxy(self.base.generate()) return self.base.generate() Further, ``BaseFactory`` and related ``proxy`` classes are combined together: :: def create_factory(logg, only_positive=True): base_factory = BaseFactory() logger_factory = LoggerFactory(base_factory, logg, proxy_class=LoggerProxy, proxy_kwargs=dict(logg=logg)) validator_factory = ValidatorFactory(logger_factory, only_positive, proxy_class = ValidatorProxy) return validator_factory Ultimately, to generate a domain object, you create and run a factory method ``generate`` which implicitly creates a composite object. This method is based on specifications that are set forth in the ``proxy`` class. :: factory = create_factory(sys.stdout, only_positive=False) domain_object = factory.generate() Why do you need a domain if you can use decorators? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the above examples, to implement the planned logic, it is quite possible to use standard Python language techniques such as decorators. However, to implement more complicated operations, the domain model is reasonable and justified. In general, the domain is useful when: * there are more than three layers. In such case, the domain model usage facilitates the understanding and supporting of the code; * wrapping must be implemented depending on some conditions, including dynamic wrapping; * there is a requirement to wrap objects implicitly by helpers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/gerrit.rst0000664000175000017500000000345700000000000021534 0ustar00zuulzuul00000000000000.. _reviewing-glance: Code Reviews ============ Glance follows the same `Review guidelines`_ outlined by the OpenStack community. This page provides additional information that is helpful for reviewers of patches to Glance. Gerrit ------ Glance uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the Cinder repository will be ignored`. See `Quick Reference`_ for information on quick reference for developers. See `Getting Started`_ for information on how to get started using Gerrit. See `Development Workflow`_ for more detailed information on how to work with Gerrit. The Great Change ---------------- With the demise of Python 2.7 in January 2020, beginning with the Ussuri development cycle, Glance only needs to support Python 3 runtimes (in particular, 3.6 and 3.7). There was a four cycle transition period, but starting in the Yoga development cycle, all Python 2 compatibility code has been removed and only Python 3 is supposed. Unit Tests ---------- Glance requires unit tests with all patches that introduce a new branch or function in the code. Changes that do not come with a unit test change should be considered closely and usually returned to the submitter with a request for the addition of unit test. .. _Review guidelines: https://docs.openstack.org/doc-contrib-guide/docs-review-guidelines.html .. _Gerrit: https://review.opendev.org/#/q/project:openstack/glance+status:open .. _Quick Reference: https://docs.openstack.org/infra/manual/developers.html#quick-reference .. _Getting Started: https://docs.openstack.org/infra/manual/developers.html#getting-started .. _Development Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/glance-groups.rst0000664000175000017500000001031100000000000022771 0ustar00zuulzuul00000000000000 .. _glance-groups: ===================================== Glance Groups in Gerrit and Launchpad ===================================== Glance-related groups in Launchpad ================================== .. list-table:: :header-rows: 1 * - group - what - who - where * - "Glance" team - not sure, exactly - an "open" team, anyone with a Launchpad account can join - `Glance Launchpad `_ * - "Glance Bug Team" team - can triage (change status fields) on bugs - an "open" team, people self-nominate - `Glance Bug Team `_ * - "Glance Drivers" team - not sure, exactly - Anyone who is interested in doing some work, has a Launchpad account, and is approved by the current members - `Glance Drivers Team `_ * - "Glance Release" team - Maintains the Launchpad space for Glance, glance_store, python-glanceclient, and glance-tempest-plugin - Anyone who is interested in doing some work, has a Launchpad account, and is approved by the current members - `Glance Release Team `_ * - "Glance Core security contacts" team - can see and work on private security bugs while they are under embargo - subset of glance-core (the OpenStack Vulnerablity Management Team likes to keep this team small), so even though the PTL can add people, you should propose them on the mailing list first. - `Glance Security Team `_ Glance-related groups in Gerrit =============================== The Glance project has total control over the membership of these groups. .. list-table:: :header-rows: 1 * - group - what - who - where * - glance-ptl - Current Glance PTL - glance ptl - `Glance PTL `_ * - glance-core - +2 powers in Glance project code repositories - glance core reviewers - `Glance Core Team `_ * - glance-specs-core - +2 powers in glance-specs repository - glance-core (plus others if appropriate; currently only glance-core) - `Glance Specs Core Team `_ * - glance-tempest-plugin-core - +2 powers on the glance-tempest-plugin repository - glance-core plus other appropriate people - `Glance Tempest Plugin Core Team `_ The Glance project shares control over the membership of these groups. If you want to add someone to one of these groups who doesn't already have membership by being in an included group, be sure to include the other groups or individual members in your proposal email. .. list-table:: :header-rows: 1 * - group - what - who - where * - glance-stable-maint - +2 powers on backports to stable branches - subset of glance-core (subject to approval by stable-maint-core) plus the stable-maint-core team - `Glance Stable Core Team `_ NOTE: The following groups exist, but I don't think they are used for anything anymore. .. list-table:: :header-rows: 1 * - group - where * - glance-release - `Glance Release `_ * - glance-release-branch - `Glance Stable Release Team `_ How Gerrit groups are connected to project repositories ------------------------------------------------------- The connection between the groups defined in gerrit and what they can do is defined in the project-config repository: https://opendev.org/openstack/project-config * ``gerrit/projects.yaml`` sets the config file for a project * ``gerrit/acls`` contains the config files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/index.rst0000664000175000017500000000265500000000000021346 0ustar00zuulzuul00000000000000Glance Contribution Guidelines ============================== In the Contributions Guide, you will find documented policies for developing with Glance. This includes the processes we use for blueprints and specs, bugs, contributor onboarding, core reviewer memberships, and other procedural items. This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional documentation on Glance and other components of OpenStack can be found on the `OpenStack wiki `_. Getting Started --------------- .. toctree:: :maxdepth: 2 contributing core_reviewer_guidelines Development Policies -------------------- .. toctree:: :maxdepth: 2 minor-code-changes refreshing-configs Development Practices --------------------- .. toctree:: :maxdepth: 3 gerrit blueprints release-notes releasecycle glance-groups documentation database_migrations .. bugs contributor-onboarding gate-failure-triage code-reviews Developer Reference ------------------- .. toctree:: :maxdepth: 2 architecture database_architecture domain_model domain_implementation Module Reference Development Roles ----------------- .. toctree:: :maxdepth: 2 release-cpl .. core-reviewers .. _managing-development: Managing the Development Cycle ------------------------------ .. toctree:: :maxdepth: 1 releasecycle glance-groups ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/minor-code-changes.rst0000664000175000017500000001105200000000000023670 0ustar00zuulzuul00000000000000Disallowed Minor Code Changes ============================= There are a few types of code changes that have been proposed recently that have been rejected by the Glance team, so we want to point them out and explain our reasoning. If you feel an exception should be made for some particular change, please put it on the agenda for the Glance weekly meeting so it can be discussed. Database migration scripts -------------------------- Once a database script has been included in a release, spelling or grammar corrections in comments are forbidden unless you are fixing them as a part of another stronger bug on the migration script itself. Modifying migration scripts confuses operators and administrators -- we only want them to notice serious problems. Their preference must take precedence over fixing spell errors. Typographical errors in comments -------------------------------- Comments are not user-facing. Correcting minor misspellings or grammatical errors only muddies the history of that part of the code, making ``git blame`` arguably less useful. So such changes are likely to be rejected. (This prohibition, of course, does not apply to corrections of misleading or unclear comments, or for example, an incorrect reference to a standards document.) Misspellings in code -------------------- Misspellings in function names are unlikely to be corrected for the "historical clarity" reasons outlined above for comments. Plus, if a function is named ``mispelled()`` and a later developer tries to call ``misspelled()``, the latter will result in a NameError when it's called, so the later developer will know to use the incorrectly spelled function name. Misspellings in variable names are more problematic, because if you have a variable named ``mispelled`` and a later developer puts up a patch where an updated value is assigned to ``misspelled``, Python won't complain. The "real" variable won't be updated, and the patch won't have its intended effect. Whether such a change is allowed will depend upon the age of the code, how widely used the variable is, whether it's spelled correctly in other functions, what the current test coverage is like, and so on. We tend to be very conservative about making changes that could cause regressions. So whether a patch that corrects the spelling of a variable name is accepted is a judgment (or is that "judgement"?) call by reviewers. In proposing your patch, however, be aware that your reviewers will have these concerns in mind. Tests ----- Occasionally someone proposes a patch that converts instances of ``assertEqual(True, whatever)`` to ``assertTrue(whatever)``, or instances of ``assertEqual(False, w)`` to ``assertFalse(w)`` in tests. Note that these are not type safe changes and they weaken the tests. (See the Python ``unittest`` docs for details.) We tend to be very conservative about our tests and don't like weakening changes. We're not saying that such changes can never be made, we're just saying that each change must be accompanied by an explanation of why the weaker test is adequate for what's being tested. Just to make this a bit clearer it can be shown using the following example, comment out the lines in the runTest method alternatively:: import unittest class MyTestCase(unittest.TestCase): def setUp(self): pass class Tests(MyTestCase): def runTest(self): self.assertTrue('True') self.assertTrue(True) self.assertEqual(True, 'True') To run this use:: python -m testtools.run test.py Also mentioned within the unittests documentation_. .. _documentation: https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertTrue LOG.warn to LOG.warning ----------------------- Consistently there are proposed changes that will change all {LOG,logging}. warn to {LOG,logging}.warning across the codebase due to the deprecation in Python 3. While the deprecation is real, Glance uses oslo_log that provides alias warn and solves the issue in single place for all projects using it. These changes are not accepted due to the huge amount of refactoring they cause for no reason. Gratuitous use of oslo libraries -------------------------------- We are big fans of the oslo libraries and all the hard work the Oslo team does to keep common code reusable and easily consumable. But that doesn't mean that it's a bug if Glance isn't using an oslo library everywhere you could possibly use one. We are all for using oslo if it provides any level of benefit for us and makes sense, but please let's not have these bugs/patches of "Let's use oslo because it exists". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/refreshing-configs.rst0000664000175000017500000000554600000000000024023 0ustar00zuulzuul00000000000000.. Copyright 2016-present OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guideline On Refreshing Configuration Files Under etc/ ====================================================== During a release cycle many configuration options are changed or updated. The sample configuration files provided in tree (under ``etc/*``) need to be updated using the autogeneration tool as these files are being used in different places. Some examples are devstack gates, downstream packagers shipping with the same (or using defaults from these files), etc. Hence, before we cut a release we need to refresh the configuration files shipped with tree to match the changes done in the source code during the release cycle. In an ideal world, every review that proposes an addition, removal or update to a configuration option(s) should use the tox tool to refresh only the configuration options(s) that were changed. However, many of the configuration options like those coming from oslo.messaging, oslo_middleware, etc. may have changed in the meantime. So, whenever someone uses the tool to autogenerate the configuration files based on the options in tree, there are more changes than those made just by the author. We do not recommend the authors to manually edit the autogenerated files so, a reasonable tradeoff is for the authors to include **only those files** that are affected by their change(s). .. code-block:: bash $ tox -e genconfig When To Refresh The Sample Configuration Files ============================================== * Every review that proposes an addition, removal or update to a configuration option(s) should use the tox tool to refresh only the configuration option(s) they have changed. * Ideally reviewers should request updates to sample configuration files for every change that attempts to add/delete/modify a configuration option(s) in the code. * In some situations however, there may be a bunch of similar changes that are affecting the configuration files. In this case, in order to make the developers' and reviewers' effort easier, we recommend an update to the configuration files in bulk right after all the update changes have been made/merged. **IMPORTANT NOTE**: All sample configuration files mush be updated before the milestone-3 (or the final release) of the project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/release-cpl.rst0000664000175000017500000002772400000000000022437 0ustar00zuulzuul00000000000000================== Glance Release CPL ================== So you've volunteered to be the Glance Release Cross-Project Liaison (CPL) and now you're worried about what you've gotten yourself into. Well, here are some tips for you from former release CPLs. You will be doing vital and important work both for Glance and OpenStack. Releases have to be available at the scheduled milestones and RC dates because end users, other OpenStack projects, and packagers rely on releases being available so they can begin their work. Missing a date can have a cascading effect on all the people who are depending on the release being available at its scheduled time. Sounds scary, I know, but you'll also get a lot of satisfaction out of having a key role in keeping OpenStack running smoothly. Who You Have to Be ================== You do **not** have to be: - The PTL - A core reviewer - A stable-branch core reviewer/maintainer You **do** have to be: - A member of the Glance community - A person who has signed the OpenStack CLA (or whatever is in use at the time you are reading this) - Someone familiar with or willing to learn git, gerrit, etc. - Someone who will be comfortable saying "No" when colleagues want to sneak just one more thing in before a deadline. - Someone willing to work with the release team on a regular basis and attend their `weekly meeting`_. Just as the stable maintenance team is responsible for the stability and quality of the stable branches, the release CPL must take on responsibility for the stability and quality of every release artifact of Glance. If you are too lenient with your colleagues, you might be responsible for introducing a catastrophic or destabilizing release. Suppose someone, possibly even the PTL, shows up right before RC1 with a large but probably innocuous change. Even if this passes the gate, you should err on the side of caution and ask to not allow it to merge. (This has happened `before `_ ) A Release CPL has authority within the Glance project. They have authority through two measures: - Being the person who volunteered to do this hard work - Maintaining a healthy relationship with the PTL and their Glance colleagues. Use this authority to ensure that each Glance release is the best possible. The PTL's job is to lead technical direction, your job is to shepherd cats and help them focus on the priorities for each release. What This Does Not Grant You ============================ Volunteering to be Release CPL does not give you the right to be a Glance Core Reviewer. That is a separate role that is determined based on the quality of your reviews. You should be primarily motivated by wanting to help the team ship an excellent release. Get To Know The Release Team ============================ OpenStack has teams for most projects and efforts. In that vein, the release team works on tooling to make releasing projects easier as well as verifying releases. As CPL it is your job to work with this team. At the time of this writing, the team organizes in ``#openstack-release`` and has a `weekly meeting`_. Idling in their team channel and attending the meeting are two very strongly suggested (if not required) actions for the CPL. You should introduce yourself well in advance of the release deadlines. You should also take the time to research what actions you may need to take in advance of those deadlines as the release team becomes very busy around those deadlines. Familiarize Yourself with Community Goals ========================================= Community Goals **are** Glance Goals. They are documented and tracked in the `openstack/governance`_ repository. In Ocata, for example, the CPL assumed the responsibility of monitoring those goals and reporting back to the TC when we completed them. In my opinion, it makes sense for the Release CPL to perform this task because they are the ones who are keenly aware of the deadlines in the release schedule and can remind the assigned developers of those deadlines. It also is important for the Release CPL to coordinate with the PTL to ensure that there are project-specific deadlines for the goals. This will ensure the work is completed and reviewed in a timely fashion and hopefully early enough to catch any bugs that shake out of the work. Familiarize Yourself with the Release Tooling ============================================= The Release Team has worked to automate much of the release process over the last several development cycles. Much of the tooling is controlled by updating certain YAML files in the `openstack/releases`_ repository. To release a Glance project, look in the ``deliverables`` directory for the cycle's codename, e.g., ``pike``, and then look for the project inside of that. Update that using the appropriate syntax and after the release team has reviewed your request and approved it, the rest will be automated for you. For more information on release management process and tooling, refer to `release management process guide`_ and `release management tooling guide`_. Familiarize Yourself with the Bug Tracker ========================================= The `bug tracker`_ is the best way to determine what items are slated to get in for each particular milestone or cycle release. Use it to the best of its capabilities. Release Stability and the Gate ============================== As you may know at this point, OpenStack's Integrated Gate will begin to experience longer queue times and more frequent unrelated failures around milestones and release deadlines (as other projects attempt to sneak things in at the last minute). You may help your colleagues (and yourself) if you advocate for deadlines on features, etc., at least a week in advance of the actual release deadline. This can apply to all release deadlines (milestone, release candidate, final). If you can stabilize your project prior to the flurry of activity, you will ship a better product. You can also then focus on bug fixing reviews in the interim between your project priorities deadline and the actual release deadline. There are periodic "tips" test jobs set up for each of glance, glance_store, and python-glanceclient. These jobs test our current masters (which use the released versions of dependencies) against the master branches of our dependencies. This way we can get a heads-up if a dependency merges a change that will break us. In order for this to work, someone has to keep an eye on these jobs ... and that person is you. Part of your job is to report on the status of the periodic jobs at the weekly glance meeting. You can see the output of these jobs by going to the Zuul Builds Page, ``http://zuul.openstack.org/builds.html``. (Note: it takes a minute or so for the page to populate.) You can filter the results by Pipeline (you want ``periodic``) and Project (use ``openstack/glance``, ``openstack/glance_store``, or ``openstack/python-glanceclient``). You can find a link to the logs of each job from that page. (Note: your responsibility as Release CPL is limited to monitoring and notifying the team about the status of the jobs. But feel free to fix them if you want to!) Checklist ========= The release team will set dates for all the milestones for each release. The release schedule can be found from this page: https://releases.openstack.org/index.html There are checklists to follow for various important release aspects: Glance Specific Goals --------------------- While the release team sets dates for community-wide releases, you should work with the PTL to set Glance specific deadlines/events such spec proposal freeze, spec freeze, mid-cycle, bug squash and review squash etc. Also, you can set additional deadlines for Glance priorities to ensure work is on-track for a timely release. You are also responsible for ensuring PTL and other concerned individuals are aware and reminded of the events/deadlines to ensure timely release. Milestone Release ----------------- The release schedule for the current cycle will give you a range of dates for each milestone release. It is your job to propose the release for Glance sometime during that range and ensure the release is created. This means the following: - Showing up at meetings to announce the planned date weeks in advance. Your colleagues on the Glance team will need at least 4 weeks notice so they can plan and prioritize what work should be included in the milestone. - Reminding your colleagues what the stated priorities for that milestone were, their progress, etc. - Being inflexible in the release date. As soon as you pick your date, stick to it. If a feature slips a milestone to the next, it is not the end of the world. It is not ideal, but Glance *needs* to release its milestone as soon as possible. - Proposing the release in a timely and correct fashion on the day you stated. You may have colleagues try to argue their case to the release team. This is when your collaboration with the PTL will be necessary. The PTL needs to help affirm your decision to release the version of the project you can on the day you decide it. - Release ``glance_store`` and ``python-glanceclient`` at least once per milestone. - Write `release notes`_ Release Candidate Releases -------------------------- The release candidate release period is similarly scoped to a few days. It is even more important that Glance release during that period. To help your colleagues, try to schedule this release as close to the end of that range as possible. Once RC1 is released, only bugs introduced since the last milestone that are going to compromise the integrity of the release should be merged. Again, your duties include all of the Milestone Release duties plus the following: - When proposing the release, you need to appropriately configure the release tooling to create a stable branch. If you do not, then you have not appropriately created the release candidate. - Keeping a *very* watchful eye on what is proposed to and approved for master as well as your new stable branch. Again, automated updates from release tooling and *release critical* bugs are the only things that should be merged to either. - If release critical bugs are found and fixed, proposing a new release candidate from the SHA on the stable branch. - Write `release notes`_ - Announce that any non-release-critical changes won't be accepted from this point onwards until the final Glance release is made. Consider adding -2 on such reviews with good description to prevent further updates. This also helps in keeping the gate relatively free to process the release-critical changes. Final Releases -------------- The release team usually proposes all of the projects' final releases in one patch based off the final release candidate. After those are created, some things in Glance need to be updated immediately. - The migration tooling that Glance uses relies on some constants defined in `glance/db/migration.py`_. Post final release, those need *immediate* updating. Acknowledgements ---------------- This document was originally written by Ian Cordasco. It's maintained and revised by the Glance Release CPLs: - Ian Cordasco, Release CPL for Ocata - Hemanth Makkapati, Release CPL for Pike - Erno Kuvaja, Release CPL for Queens - Brian Rosmaita, Release CPL for Rocky .. links .. _weekly meeting: http://eavesdrop.openstack.org/#Release_Team_Meeting .. _openstack/governance: https://opendev.org/openstack/governance .. _openstack/releases: https://opendev.org/openstack/releases .. _StoryBoard: https://storyboard.openstack.org/ .. _glance/db/migration.py: https://github.com/openstack/glance/blob/master/glance/db/migration.py .. _release management process guide: https://docs.openstack.org/project-team-guide/release-management.html .. _release management tooling guide: https://opendev.org/openstack/releases/src/branch/master/README.rst .. _bug tracker: https://bugs.launchpad.net/glance .. _release notes: https://docs.openstack.org/project-team-guide/release-management.html#managing-release-notes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/release-notes.rst0000664000175000017500000000466700000000000023012 0ustar00zuulzuul00000000000000.. _release-notes: Release Notes ============= Release notes are notes available for operators to get an idea what each project has included and changed during a cycle. They may also include various warnings and notices. Generating release notes is done with Reno. You can submit a release note as a yaml file with your patch, and Reno will gather and organize all the individual notes into releases by looking at the commit hash associated with the yaml file to see where it falls relative to branches/tags, and generate a single page of notes for each release. OpenStack has adopted Reno because it allows release notes to be written at the time the code is committed. At that time, the impact of the change is still clear in everyone's mind, and it avoids the situation where the PTL is scrambling to write a detailed set of notes at the last minute. You can read through the past `Glance Release Notes `_ to get a sense of what changes require a release note. If you're not sure, ask in IRC or at the weekly Glance meeting. Sometimes a reviewer will force the issue by adding "needs a release note" as a comment on your gerrit review. A lot of people who write high-quality code are not comfortable writing release notes. If you are such a person, and you're working on a patch that requires a release note, you can ask in IRC or at the weekly Glance meeting for a volunteer to take care of the release note for you. You use Reno to generate a release note as follows: .. code-block:: bash $ tox -e venv -- reno new This will generate a yaml file in ``releasenotes/notes`` that will contain instructions about how to fill in (or remove) the various sections of the document. Modify the yaml file as appropriate and include it as part of your commit. .. note:: The Glance team has adopted the convention that the PTL writes the ``prelude`` section for a cycle's release notes at release time, when it's clear what's been accomplished during the cycle and what should be highlighted. So don't include a ``prelude`` section in your release note. Commit your note to git (required for reno to pick it up): .. code-block:: bash $ git add releasenotes/notes/; git commit Once the release notes have been committed you can build them by using: .. code-block:: bash $ tox -e releasenotes This will create the HTML files under ``releasenotes/build/html/``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/contributor/releasecycle.rst0000664000175000017500000001251400000000000022672 0ustar00zuulzuul00000000000000=================== Release Cycle Tasks =================== This document describes the relative ordering and rough timeline for all of the steps related to tasks that need to be completed during a release cycle for Glance. Before PTG (after closing previous release) =========================================== #. Collect topics and prepare notes for PTG discussions in an etherpad. Add a link to the etherpad to the list of PTG etherpads (for example: https://wiki.openstack.org/wiki/PTG/Ussuri/Etherpads) Between Summit and Milestone-1 ============================== #. Review output from the PTG and set Review-Priority on any high priority items identified from those discussions. Send out recap to the mailing list. #. Add any Glance-specific schedule information to the release calendar (https://review.opendev.org/#/c/505425/) #. Update the ``CURRENT_RELEASE`` constant in ``glance/db/migration.py``. Include a ``Sem-Ver`` pseudo-header in the commit message so that PBR will increment the glance version number to match the release name. * The value of the ``Sem-Ver`` pseudo-header must be ``api-break`` (which is a little disconcerting) because we need to increment the major digit in the **Glance** version number (we aren't signalling anything about the **Images** API), and that's the constant that pbr recognizes for this purpose. * Example patch: https://review.opendev.org/c/openstack/glance/+/827919 #. Focus on spec reviews to get them approved and updated early in the cycle to allow enough time for implementation. #. Review new driver submissions and give early feedback so there isn't a rush at the new driver deadline. #. Review community-wide goals and decide a plan or response to them. Milestone-1 =========== #. Propose library releases for glance_store or python-glanceclient if there are merged commits ready to be released. Watch for any releases proposed by the release team. #. Check progress on new drivers and specs and warn contributors if it looks like they are at risk for making it in this cycle. Between Milestone-1 and Milestone-2 =================================== #. Review stable backports and release status. #. Watch for and respond to updates to new driver patches. Milestone-2 =========== #. Propose library releases for glance_store or python-glanceclient if there are merged commits ready to be released. Watch for any releases proposed by the release team. Between Milestone-2 and Milestone-3 =================================== #. Review stable backports and release status. #. Set Review-Priority for any glance_store changes that are needed for feature work to make sure they are ready by the library freeze prior to Milestone-3. #. Make sure any new feature work that needs client changes are proposed and on track to land before the client library freeze at Milestone-3. Milestone-3 =========== #. Propose releases for unreleased changes in python-glanceclient. Watch for releases proposed by the release team. Include branch request for stable/$series creation. #. Set Review-Priority -1 for any feature work not complete in time for inclusion in this cycle. Remind contributors that FFE will need to be requested to still allow it in this cycle. #. Prepare "prelude" release notes as summaries of the content of the release so that those are merged before their first release candidate. #. Complete the responses to community-wide goals if not already done. #. Start assembling cycle-highlights for the team. Between Milestone-3 and RC1 =========================== #. Add cycle-highlights in the releases deliverable file. RC1 week ======== #. Propose RC1 release for glance or watch for proposal from the release team. Include stable/$series branching request with the release. #. Finalize any cycle-highlights for the release cycle. #. Remind contributors that ``master`` is now the next cycle but focus should be on wrapping up the current cycle. #. Watch for translation and new stable branch patches and merge them quickly. Between RC1 and Final ===================== #. Propose additional RC releases as needed. .. note:: Try to avoid creating more than 3 release candidates so we are not creating candidates that consumers are then trained to ignore. Each release candidate should be kept for at least 1 day, so if there is a proposal to create RCx but clearly a reason to create another one, delay RCX to include the additional patches. #. Watch for translation patches and merge them quickly. #. Make sure final RC request is done one week before the final release date. #. Watch for the final release proposal from the release team to review and +1 so team approval is included in the metadata that goes onto the signed tag. Final Release ============= #. Start planning for next release cycle. #. Check for bugfixes that would be good to backport to older stable branches. #. Propose any bugfix releases for things that did not make the freeze for final library or service releases. Post-Final Release ================== #. Unblock any new driver submission patches that missed the previous release cycle's deadline. #. Review approved glance-specs that were merged to the previous cycle folder that did not get implemented. Revert or move those specs to the next cycles's folder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/deprecation-note.inc0000664000175000017500000000012400000000000021053 0ustar00zuulzuul00000000000000.. note:: The Images API v1, DEPRECATED in the Newton release, has been removed. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8222988 glance-29.0.0/doc/source/images/0000775000175000017500000000000000000000000016370 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/architecture.png0000664000175000017500000013625700000000000021576 0ustar00zuulzuul00000000000000PNG  IHDR  9IDATx \kٳ5vTnERmH}S#iQ)D-|?{pcfjfzGk9st-ysGEEEEE~M߼ycggסC_~ܹsE~YtHq2qJ_z?-kqϟ?ӳJt?_X >}4l0ssǏҖ.]:d\ZŇȵʢ琊ЂDdÞ={`閄oᯅTTTOGGG Ν;DKH{sCl@[eɩm۶"\_~gJHbH >}Bz.eɑ/_OcB S%e۲eK''' Yʆ:)))ڶm[k׮P6PQɟ***444.\PZZZUUzs5)jÆ Z޽{vՅ uҴiS**[nՏ1F"Ml\lx-|͛78pȍ7TrʠAtttL}97vXǁMM &<}N^^ތ3p33d։'@)8}Xl:t ͠yu֭}˖-CFF EP.]<%=gWXXͯɓ'Q)111::\ mmmwwÇ6Ӽysm۶]zSN , oegg/ZH]]=>`/ BÀlڴIH6={ M߿`>Jz^^Ǜ3g*++ (p9_D]kN(**,--qd}J?#?CHE% ΢)5jSi"Av&M8;;3uAϞ=[h KR_OOo8Kx '#vvvpA$ TTҩl[[[hdp'zp(~`>yJcǎ_ԫW/xkڵB_|u6CQ@@>|PUU555't4qX5k4o|Νo;vllg"ނաCxdٴ{n;<ڷoOSB}Nـjjj^!e|BAA!66F6'ɓ̙3kd?%Q6𖿿?~9Cϟ?WRRb۷oeNN˗ĉ+[ N@E%ghѢEV])))-PXX'chm Yd`@x(#o3f1{uEEKo>uuu@}߾}?}1mm&MtѣG5?LYY$eٳgӧOWd yN:vs[~Yf0i$___YI_iآ4Az}FF+|'cVVZ"}KQQ TTr)W^ k&\ih| 7PQQIH4AEd oh|LGJbu1***Iڰo*IT63FEE% B@@EE(K*** TTTT?I =RkAEE%lWBO8}=cTTTRV^ ***ia㖞GexZPQQQ6ЋGD(8E(8EG4A>@EE%-lu[yW;}o6kxeeCjC>blmmqLQQQ6P6*`ć1f4'g~d,CCC**7P6Hlv98M'llx NIi|eGox b? a NIi|e؝\w%q/txJj% ņme  kA@ՀKFFFqq_žٳHo|Cԙ[L+]Oun2/Qgɘ@@%gz㕕TUUMLLwϟoffV? Hf5iҤcǎGYA__ժU͆oZiʹOzXͳF6*Ul*yRaavϞ=SRR1ݳnp6%%%ݽ{mذa3{CܹSMM_~ )OM1gc'7l|~69vĺmUTT[iE{Fiozlt;uI)ll]YZZjhhPl;v,SNUQQ)**b*L0l/YMկ_R6\t;NRTV NAv]Q')]>&v:::f0c`C@cCV֔ T2?CUUuڵ갳͛k۶m Jvn۶mÆ =1bDǎ:|?Dcwݺu#;o߾=w^auسg7V^=p@I+9o0ue_Y͞#@@ާ3°vxeeɔ ɲ(suBOTH&*ѣO>x'{edd`ꇇ_~]KK+ @6@ _lӦ σWr߰xn{-܂һ Db3/ ؘl-\!lprre?$ч7ʎk` QDD0!TI-ZƩVVV><0~xb&C^N:u޼yBؘى ,gùߛhgǾ(l2[`Õ[^YU jdCBޛ:i`CAC.6Lۇ֏umVr&/(-- LJ}'۷odžiXz5>x ldee2scXՅ zzzK.e];w^rϕ7to XGw(+6L4f!ـr~ސ#a޽Ġ())FLLLllF1v7w_>xZJ㲋7N230Ç@:}Ν;۴i tСCp5jdCzz:pFFpH8=\ᘒ*o &L;p}LvO^~a˚ZMX9) +AH6TO{6R X }}}V'5{"& Pd xO"sH.c&Nx=&Əߺuk@F6ddd}; 8***ӧOSN\ UTlÌ$p3; .s\n,֕ݻciҤIhd >.^HBCC#XJIIg ;%mmm>}*Z3q"l000 6\ ԩ3.qYY#R6<~.sssɞ[dN]&MΜ96C ѮOMMN|y׮]<޽{o_TPPnii^aԨQ/^x,?)c8ׯ_c 0a- fLwh6m֩k#1I|y썹KS6PQ6Hpς_>>:::""",,Ν;AAAnq㆟}||lcg@@@```hhhxx8><}ŋd]|R@ slxRsɘ&Mͱ ˡ%.ވD5#w!?l8t|jK<p :Hficq$ޫ&(ڴψAHxL6 wff&H@y!`~{ݽ{>88`}67^yK._pgϞuc W\pشpH ! bw$kra*fQAM--ho+D%kXEVv?pQgg#G]\5f'9d铄(HAAAIIyߒ |(**@YlД&Mls1F|ÎCـ2w:ս'<識p!vZM<ݼS6W׫WRSS NNNr: )-jӮ%ׂ# P͚lL]]2ujeHb9TTHw6ZkhpJM])ī7ocHHHk(6@Cz ((d d}u 3qYuJb^ }'}=%f>^GG|>8l2"K. ** g <Y"Cf;wʕ+!!!vfS"lql dY :4.Z;~1b@>q ?ڱ0p5ǁ ---55_o$lq9SgP6P6Ȑ,--`aXgkX̬Ȁ0@ AMa>Xg),㳸.L|Caaa~~>q'OGf׀g7P6 4e,|޼y666ΝC,>,5uvvvnnn^^hSЫWKYbUTTa۷UUU>}+++uJJJpL߅𰳳[pa  laÜ9s,,,VXaeef+WG+>>>3YPPG vFvǏ8ß?ׯ_}|c]&DFFٳg۶m֭YZNh|8}ollW6XZZnڴ >ĉ'@ׯFEE%%%x)q#` aX|x؄=@0^ɁQϜ9s? =NΆ'M4iݶGgz0^񛲡lؾ}K`nݺtΝ?|D\\\bbǏ322<{,%XRff& d4"C8N``7]@{!:t􄩍 =yq۲۹OG({Ac}ßR `N R_T؀.l0d$:CM&()+_F pдl@&Q6Pǎ)$!8 C__ťl@|+òuLNUwv7ИYւq:{W߁:mq(qٌy˛he=tڇywة Q6ԏ\]],Y}]pvl6Y`CT6lܸq޽ 7hkkcgffHlXE]CFxIDAa̻-t& Q6ԏ޼yc ̰ώk% ;w<|0S6P6(v6*//I^|Gb CVfݷoŋlHee }qq@(N@vvvQQ =l:YI'/pLxu>Hii). |hPÃL(;Iћx@w₦VS9X^rLAJywNm?ʽM Q6p7“$Υdn+T$?DEx%\7qK.5(o>"20*c筇)74VRRRj>?LRl69tбcN:uʕ 9$3q"JJJ|0uX)VMe p46Եy߇w_9{}Q}ܹ+W`? r J(ĆO6s'v햝[XRPT6jX:tV)|`&q Hg.l7^2Q3HuX؀%@ |[n "x|}}:EDD (☀P @P{6?{ݤILrNYX4eC#ҥK8p5ql'}M2l?C2`?8}"9>Ln@#x۶m[`ol7E'ңw_-( 2&mΝgΜ w^dd$@R F_nN Or Qr}ѣt=5; my//`80DQQ|24IP:O@TD✜x$>uKalhD OIJcʆo~:Za%# F7eeeC les]2aIT6EGG3! Ɇoj/?Z,tCB !!!0`jLJ yPUUu&x/nBnҳWF6GFF֎ dJ@@ 3lxOo?V7lWJRΔ')e~*(Lϥ )PHyo6;8L1%;).^ۻw! ÇcCJJJDDDPP@ < \` `L2Tv*pU :2+ruuu 7oD۟Oׂ QQQtA wxPALBa=ArG--ZpHlw^\\\؀ `ҥK)( El`ۺ DP]*P6K.޽{³=&&vlHLLZplغuѣG)(x4.T]D`VBP6?&$$\p0`kdCPPPttt'''ggg777M lxRQzfifC;.]x](l!`\z, 08q 8ɵcCTT닿Y̪ Ȇ gK'nc59'Rϥ̨76<ͫF/_JJJ`cccA @Yg `t+YՕۊ/^TVV~a@(.1Q79VZ`STxQǜ𢼺 yCdы`3X'2l@}}򗨠ԩx=`T7P6Ԡ*ÁF/u|qz#K).dPRV5f|Ɯg:ouq vÆΝ- wqq[E<%ޡGbӳB00lp$X*\qQ lu̙' >%g u  bo~灃 sh4dpq0UFbhcTGyEӕS###)(ohlpsMoy۠_~0,\bnB7_{6 09pL[,_mwaƯsAoAwQBRj![!6oLٳgS6P6xC#dCǜ7oչKW0aaa|:LF[hy6୳RMlڧ፠1O( g-6LTw`wdzbpz Ԋ((( -$tS6l%HůeŒ>%׹[<ˀ_IIIq7QR799ݣO?ee)3xݍ[ח}\ >eeeA)!q9or ASo% X0(bcC†Q̘̹'Og: 859s9)e_|wH􌹋UTFls8\7D ;*++eY ┃v.\{R ^ݛLFXl`&#60Hl8|Rx*)^jj>I&ɬ4 /Tcf9 %ujNL?܅QcƛϚGp#0;,22p&6:ll l`+kyJe_ݼ'TRRj:ww<b" VV1CNf txA8jjO +٧WhL;mv;wy{={3lt>UD:!;*x7\ mզ)ªG#*lRKK pҥ8vDXXol r2Pe&L6kB8g8˾QIٽznH 6θ{Ng 6VVVYl62aB޿s^Za{h\v#bhhvvv o#6H sgjK~qIO_V:E0<+. 5pW`oheolbCfU~³Rvm{ExaǾb_Qn-ۣll***=2ʆzY)?'*oppp c...3w|<a?*L2][yF^Q?J̝Lݞ#oFM9wdS!ǜPAT66'Ok۶f뺏%gJao*ed;&cp6.0l܉q-1]8v詋 n?TPP.|O܅g.;\i;ب[]~ OAnܶW Wx\?Cr t7HD8VCMMMFWagؠ; ^V s^dQF8kh0E'0^]^ NNNW[[*;;3ֲ{ZQ,~d7|Xv> x9 pZ0h06܉ lxOPP~WZZMݏmJУWe5V[ZOrMv ޝj6 HX~뒕n *jf#.Zeu\cמ#XPTTD"~I?3ggqqY :woݶI1264l,sX)(xTZZʾLՉS۸9z| ]ز}vXsUTT?N?iZJbSaiܺ6L73~)6kZnQ05өs9y1,: (9`@͆ͻ 2\G﹋W1L\nWcQNxy;I)Q64矯,YZZrtV;wϋ/kߡCݻwطٳg3вuvqi<݅Ex6s~q*<)yD/O0h5mǢOcOJa/ ɆO>>}Ǐ^(Y柗tҁ^jـv%aU i*@(aC"bӫJ͌93a0 LL$ft!ꕗ 01mхi3挛d]^5)l'Nv 饟AC|eOI6aϞ=^R"Qjmۖ}afW`Y͆ꡅ [lvgpa2/2cΓ 4;ĆcΦ?:kSIt=8 M7 vūS)7>~A,+++ ...vvv2<|g8g Eo=}c.mڶGr )((lܺRկs,KV qNMM-!hN$†@xFؙ缺MVa4~|B6`b6ij5e|FC#y󩠨xF` W ╨$K|X:ee46pD}g[3Pt"?01ރvN'/NܲFC_6T9!5UTT54TԀ2@ U/l7XnݵƑĻuOԉ*y'$>UGoayT9 ˗уy&sl\ۺ ^E5]ls0H? bYs.pS!xWjvE%cɊIJJUPLvolh6v {7ܿFj އ_ UTk,)CeCfם4dd1pp~wBR;#{jʌ9ap#$wTUU+++ɪ 'ƍgdd[a׮]M6l 6RpAU~J&H<%wH􌹋UTFls8Fn155v6o^کS 0G[[4- l̝5v".TR~Cb3ݼٗڮ1§&TRRj:ww<b"`LB#LMMl㙥 DoH `|af>gk׮a;==͛70k׮%?FKа>%&Mddd6(**,--O-[2!H/$sM' 3 ?ߪM;#cq<Q)eIb;Ldo8<\H={Fn,c;((M{@F f̹E[+q!//n߾=p_tuuP %{O7Hh9)g0H&޳g3wOQf} Qǜ   Vmg$^fY+WyJt?,DKnlBoc{}I(T9TTTz]ZZʓ YYY=OOOl ?h| Bv"lj8p͐Hνz_ QU pt݅la܅~Sn'n?kC6{8:^=_yOңS;8RaS649K.UTTemٲ%9hР1c111gϞe컚ZUUyMM)S0wލ/JHH{*7 CZ6? 6O|K0齇O ]0WL`(^pqWSnѲm{]M---prV\sjУ~89#x8xmuh; VZnYܒP!Cz{u9Y[q"7.e TdÃFC_'/Ò q'>'4OKlvYf1S64j69jkk PQQر#2hfffx~:udccC$3f(++7oޜ쩬ܺu KpVfΜYXXX#cǎ=;r!ǜ H'Ͷ (F L?x34jD|6sf_^}Y[v6F{';rQVVxI܅e7=37Dex>`imǾgݶݳ QIēcѴ7$m۶U\O &>++}QQ:kPS!DI<,}7@يo~*нg]˾`*!:'UVQ-p隟}ɼJ47މ*q) -]KVG]Db/i. &qWu^ f]Lê}t AGGGTࡒ h8M L6m0^dhq9w bɿ¤d^|ǘi3 6\n2yN$ i/P>4pQ*\s_QCSsFjܽUvv#$佩 j cCUUՠAm&=S2_4֢ A0ا #Fs\z \0٬?/ƆpkݯI&ac&L]bHlI$+SJ9sfa/U; ɳ7;>#8Ʋ؀lFUSgXMЂuw( 7Ա~%:~iEwllm 2"/3hڱAe~Qxj,&c؅l``w6Tw"潝p9 M-'Ș( .TPsX,%d{Zq l ee,JB^ ¬ZZ*?{0iN'EJ}6<>`^򽫪Q]06,%96BcJٹTGAl*lK*ܺ-4vWifħCTVV?4>vf.܉P ("Z3s!O}&Mu8dhΜ'8o^rA\=$ox~݉L yZ6r:7o޲c3sc;wةk7KŃrI1g`xW%U NnC::1l;7?xm( JJJlHLLyJuu ^:j5mlyo9C^ BLQ]r=FRV)Ӊ {Y$;݅ 6)3 W?)n+XԱGl06˼|Ө SE<)Wɠl9 6X4aI-^cyf/XnN=zP!Ň-{:wj:hpHHf5{Y̒YtZէDžqRlw₆&v**)a{Bt;u9if ˀƑtt9CKvv :j N][_6 )g]x ~5wn;7ʰA= L@fڰVWof=< adlL q٨yE̝;uH1/%lH/{?vƓ `0`kO)fUTTDe?g~K${ n.TI/] %9)0;knbưQKxPuo.?| -TTTIQe Pw=on*Ô՛lUTG ^[N;cy 6$66|L{ `udZNѧij TN`C5!m(|hE Lf.!Mg- #sL7N iH0u\P:vΠTak~&cs^pqM?CQI K>1RΆ#ëW>qǘAS*b (q'3<|ACG `srY p-F* 7>0j&c'\ <бK0/v p~xF=ZMϳIw_@dB~Ai^ ---&0!+唝^pw<#t NqR~L] \xm6{8[Od;X^T1l36467pysu;udY@|UH PDaX) p(IѻvKA l-׷'M4h:v 6 6_aɸɸP,P #4 O͡ϾhH. >HɷOf~C)gC"Mjޱs ro.\# 9nڌ9d2ҥw@r5lH-ZL9 WDq7:-Io ]HQ9AX6)oXy.^Ob7f2 qS̺2P)7j7oO dT[ bF闐'f3g|C7$x삷SE:IÑ&c'ihj3Q ̲H!Ra/(?(>#hۡcn?f]ܩ%߰Wl+7+9쪪GB"3ъܵekT ut]O6H(Caҋ|lq\QaՆ88<`f.:Aj`C V|CI<4Æ<4hui7k_X|cdس˲3wVU}Gg| L[UMIw_~Gco}^wMx#I5@V b(^k5mִ62&`ñ YxܼפIdCiiS.]𮋋aݼkEh0L\nyqѼRK9N~r6;0BvRUS*xpk+EO>wvaMY|pV/4oZBJmTaWn>dhƹל6:vdC\Ne=SAY[r%In] ]x;F͠tYx&TNހw  >~'>G@zuvad]<Ĭnrދ~ f/ u-8SfAW d |j!11ie2J-jvoJ8yo?2:9Ohdql+unWn߸6l?sрdͻUTJMѻP Lg? 7@#f7"&NOzFEj4v*R fI1)Hб33CVPPXɶ, {qxq1 h ]Ofh!3;3~9h|OA缃|**)6> D<ϟ7220RR&|a$Pkcy Æ}GU'Xt"潫Nγh1ƜAjw>էČ.6 Y5LN W#J;I3+`v=+co6|C'%پr˲ܨh]4ޠ de;k.8>!hpPEc&gRpz4Ara ORt } iwa9`mm#+?_ %LH硅l`W^'3ZYnAAs{/v6 a 7D=!U}JucÿC c~<71M=w'o=sSvq3o3lHfcmΆnGW畻ps=aAؐ'v*'D'íK̗ZZse_B 痞AYY[T)((XnżBٜEɆF4ސV{:w;0>3ό7g<+WdF<<<*"` ! aD5yCH5n*x_bvf2R:Sg#c H'w6 R\sei3'/?̲ {n3- =òu[UTN_, 7ώZs7AP!є )'}Ⱦx丅+I ***;OؠÞxOE;C=V (2^08<}(Io̿' >uF8ܠzwJx0;t"5`sV(Zp8apZv60 577WRRڼy4E?L+Dy'Lm^꛻EKӪ lH9{>S~i/'McKsč]Ϫ5֎ߎ]{ZP- <߀! L $2W4}ЕDP͆)L'Rӟ@@ LeA@B "e}/RX]ƾѸJKK apƙXz=z/w(=~] *qYwbSWq~BDe3`fw!4:=-4:7:gDۣ'fST;*dEyJe][ l8}x8 5~FиlV$Μ)#=gC* Ι{*0TS!U!oR6r֡&cxTxR l*,$@I@742Jz)QQU3dw;ڽA{YH-_EU,;k\Ix )tojy]%>]ɶUv5^CuEFq64}'lzUd]]A(Vll 4C}7NnCM4i+oKxV (lK'ҨS qڴ0h!+T9{. ]׉9cCbbbClhH6&EԼ pa*fͱAM--&Gb5 6;3)w7` y~] XZEUU(cΕP]2 ϟglhlǀ}PBb3IXt} ƆE+ߍÏFx"Mc5 6 6Dɨ1zrУ,҃6 1 (((DeT]Bduy#Ol'l޼ӧ c'MkTl<}vYXIIyߒ/ 2&)Ml<}tN\w`>wi>k7BRl?{ɓ=+b}gYI3)1=C*շ5wn7#K{j 8pј=JeW%o+O{qSXeມutFɰÁK|嵟 2MMMRSv7G4~dh k7(gkzj* "ST'LxƟ~kZW}4i.MQC%%%V7n:N}nmRXNnߥ'>3k|hph\:4o$$$ 4(88X*S@?v]9[֝NV-771x4xL߬n …Ow~.ՎysssAݛ7| I[z-QO><}ؖ8YedhNSEEE* qnۛo$Jp`EνG|,s^ D`÷ Gt% +TA0OiСz:{cƪk &%'#7ȴlw*7Hxk[WÅ97̝'ۦ?뭷;{]0iɊ5;?[{7l1,B O#FAKKk̙W^ɑv ,0{H|G!>))RwGֽo^JLGn]BT9憾kkߺ.n$X

f̘7TL ~Q b~΢e+!PYYYZZ ^vzzzbbbdddHHȳgH*d" b'܀l9 Ү% 7gW;oRuw*|u`2YS"K *IDDD'X6 lpwC [vDW Q"KV:l~cDa27g 6,iQQQ~~>`06d)mpZZb(++xrr̍EƦyi33`lr z())ZPP$K*q8<@sH Y;*WA{27L*0R2 dAA]t7.鸁dN4\]i$p={,>s  PEZ + o#7jkk Q̢OL8sO8A#X47 *zQZ̞71q صk8e hGN6m Y,ˍy,pg 111;vLJU_שɩ ( d?pYmp8Dܹs! jjjB!%K̙3#7A;:}CCq<(97 vŋnnn`U/\ l`=jaa1y͛ Р +++i5jq``ŋ-Zn {p;XXX 1!!aÆ nCQQ$<rكL;vd5o<ȰgoGn 17ԠA`!RCq]S`ٲezG۷L3q'.p\8P(v_te t KdoȐ!O> 7A:!F|ӻw@%rȑ#$99^>+iӦ-nx𯫫kGYRp͆ 冔+Hqvvkkkuv:Ў ok׮I@$bU3z'N]bk!)) $w^WAEEBW၂>.g#^`m, 7q͛77nZn >#bH@08LLL nnn+*誂1BxԩSp˗/ۍ~:y;o@n@;uJ{アcZZZAJff&Eܐqݻ7n܄  7 ?3###11̌H#zr!3"~ڵT/^lɓ'p;!zw?^jUG=cЎ N֭{ M0``:۵Ν{wիW4h}w kkk6 D]~}ԨQ!CL4) @$o`!Kp#5k 6 hРCAuH|Eի(6D܀܀x=tttӆ1c_ ;tPPBn@ 7 7_عs'J5 D!44ł܀ İpB rDLLLP \߀ . QF@+@jhh454J(8ހ܀܀P y ڃ<٬(dDbi44@!pR5S!=nvp/ӋQx*w^~yD e}rCorSZ@ (a>/xF=~ݯއ/2P|8t+,鮥ɋ7 )4u?{wI;w[3׹eϹ7xÛ/sG.X&_{%^ BK}n`Xϟ2e 4rH:NRn^p̟eȎ_l b iˎ7,\BotFɲU {{IMc["qϡs/ I~k ?DO\İg|:{oG9udw0't%CǵIRqnh[!Ácg:z,'wS95(fQGyo$mÖz7 7@ky`#ZL"1c-\s0p >5 qpmB #fS&mnr&&&g:s7Yrwx dࢾ$%J#5`ϜADЈֱW<^$=py 5qahȑ:ws ss ^ŧx?O[y2(drY4e/\c ױ i%W>ps`4hC*77NJ# 0s yviXo3*4h>-[)|/$4Clw)mo|v% M?a[)ܧY đZ`|Ye J`ur\":PpB&:`,8"k!hDhSg|˾p|3>%n7@ v3}ty  E8.7AhK0h$r py,\&2K$Κ3j qMQ,B6NĹ-8}¢YrxS ,_w8,`pU'yMc[z]@܀GŃǵqTaN~9f r˔|@@6⢨( \?O '9t*BĄ#i3$LdyokHD-p'f078G 6rx$|87a p} @@DN+(|e} S.]c/x#^k7tr.I,MJ5d2I:OiѲ@`:g1US%bPyD $$ ^6G(-_k,A7 ?`^gac_ot GN]\Yky_R/X 􋤛8xoe/HN H VC *%`!o@yJujw5ԓ588{.7ϿC#F,^J܄m>{غshneo`a F^6nb<6U`g)iN)06]H[??tR)J$؍ (m?bY 7 717lvsษӦo@nKn8yJoG !;ǚ444p}rrC_rKWCϻ$fΜ٥~ qH Okid& ѷ:mzW| v***T*+LĉMLL$Pr8UUUp"!!AQQqʕ 4Xg----$ݻw…<W1jjj, )!=0SL{$vtꒅJeee Y~$Fn@&X!''G»mۦ%s,97p\0?V(r ěv…`X-,{w444tȑW[Q T5+ \߀ P(swwҍwޕd `nn^P҂LLLI\߀܀@H?P@$@ T97Ο?/Ḵ0`\߀܀@tbhaܸq{y"bv~|ڵݸlv1xr!+(((**vgXjXYY^# hT*(ۻ{O nyC սƌӥqi.%Kt +ӽw^0Z' l+<-Tﵵ9sfƥŻ ]L Ǡ+hhh5TSS۶mۛɼapՍqi.&6 `jjڍIDpww>}k7Dz-ttt/_Lʕ+7.n}CLVnVVVFxCC g ڭ͹ (ÛgAU7k tE&+7TUU-[ 2E -eĉ {6!@F# 1`[[[bGAUUuѢE@5B EvotLO_|׃A3k,wII)//GsLnn@ ??3f3lzI܈ٳgϟT< j##cjkk@n@|͜9s{f3o߾MGg }-jA>VSS4ieuuu7`">,7@vddd$P rssQ rLرcO:%~;w^B*MgC4r2PPvOu[! iAC YxApss#^PPO...ФgDO:Zx1D)6 ν}6GJFM ܀!HohA@9Gf0mR۹lwώK ~ҥcaǐЂ@ Ǐ90}J/l6ᅦ*22܀nt7?tPJx޽ܓ'ON0҃JKKoe3]__nС2uu#F|2@֗ȁG}H;99qKnѢEJJJRAWWHUUFB~ˎmj&O( O8Gf'=n\pww_`eFαcdJP} #poȆ2Е##Yٳg\\\&Ȃ5kր{> |2 *0:% @>yyy[̚5kժUb\>H,uuu` Xv2|VOٱ} mZKd-u%7NAtCCɓ'ݗ.] Z,-ڀT6T4XA&_}UXXp-[ %>$**((\|Ν;ӦM`0N~@8cӟ|˗/b-`䆊 4ʲ`m׭[.w-X`̆iiiP}#F7Ç w/' >`/]D͎~\ >zbccӧO;%'--- wĉO)u[ c͚5č1(XG%ڰa^/222o4XCdv*1a„>T"R' y;Sgn!~JIIkjj ~rvv#rNNN! !>fuuu%hO8!!7[^ni7~ VXG%P4* 544$^5Jg@TWWwvZjj*a/^ܷo8pg)bǜD"ifD# "&)L!L+SRRߤ"Rn!6 i/D-E rww/ZoeO>DxAJS v;{7p_W__?<<h"P?##n? 7-A&2>}U@K.`jj:yd?~/@ J=. &|wQQQ-ѣGp8ҩ7---(m#FAv͑cnAOO\~}̙м^#dTȉ'TT*UIIiʔ)hP{HhФ x<2n8hѺHd7C J^1=@`2h#VWW9r$Ѥn 3f y f;U"u *ga"7 ꫨtE(9K. wK\+4  F%z/ٳXgϞ 999"мy#{%())U"… -IT5eZ8RkΝEb_ 8o۶M0r+񱗡C_(ԠAdɒ%+KBBDrI;bׯc$$XRB}]KKKt xbrb555#Tӧ bHMviP"bun ]Fc#aeőt"Vo2x$`O! rM8,nkk iΝ8 ĀLڵk;nT=O==3g s@} s]EEE rrt@ d`Xڀ7GUZZ:j( Wwb_CSL>|8QWG5QTfϞ-<@ni멥\!C͛)Ɵ>}:QFeee2\%K9RdKD```@t1_& 2bIC ^wDHrÅvD i ' L9Ԁ!`go!*((&,X,8- IqCH].b"UA)'1 Yv[0: AA'`tG*7nԝ7}իr!krJ¸̛7Jx!BadСCcƌcEֹA ZNyMnY-&2Yf+:gL /}Qr!OGnj9ܲ5d_}syð| '^D)K/.򪫫({Ђ,x(#((&Wq#f=U5E򚬒dNMXv*'Ze< fXٕs'-O:}- ~HyxK\A0(%,S[+CyNL'%Qʪڂ򚼲4nMtnsV;&gB/BPE>GDtMg}3[+_&yvdn<45ͭ(**j9)uϲ)ףjt#.U^|Q\~6T+DBsV6G*Vi?ZnyHdJbWkl~4Q(@ -,_MA{&min@~|Hݖq͟XJ䬈W`dH@bF5 VayY,1c7>_v^u ՓFӔm"&6KP2HN(.U!7E,ZMRВWߎ_VT^%e9qD---a2jAE WFs2zI ?xٳ 6sUTTpCGJ3Z@DF&rh~ g#+d7[>H[w?qÃ`hߑR 2!A Ju.͖-f4sʆZOR~  b1B3A96szqtmhnq=XX9\@z fFä{=;4ei6䭦3Nﲢ|L(1e\R7DA((署LmIkMk6Mi26$3@to5KaOS٧pez"#QVV2eG)O8L$Й[eӔ-|IkC?ᚾ׆fӓ" vI iv95Z_%ߍCяQ )3 c>^=r|K,5'~G~18gb]Ⱦ,[)}-'Dae˖H}.(%:ȳ5=fiq`6٧6X^$ r=weDybMq7G3{!܃Orp>fh{sڃuؑ5GcZ ٳg̘!X!٭~9Y^ܙuNIUqFމ(^r+ˢE&E6q%I2wZ‘ZnI)1 |ϯnߍ@X,l}+(%#K5V7vncZ MJ4@E@n 5*)~_T$M UVeXn]bQt?M)ooAD]42ȫoE.n@A cu[JIgM)vZGf_vSP^ˋְ–VJk\ 6Fq^d=ͬqgT$GFpeT2.-;` 8D@gދS)NhMR-чi -h /l)(hM*m/n)h˭ƪfV9R+RJ":s90O1c9}~,rD-YɐŌu?0kTZȪ ή^V=-qP7%%PVH)n.j,h 4}7z1kSx1K)-w#{#-Y0 qڗPP$07|w/aQ򶇌W!ìyƮ ~M ̈́@H oDp@t V{jw/\PQo΁'99[7PTuukΜ9S|g0by#IMi;1s,M(`Tdgտk%TYL)no_g06:2֛YRi[q)_!NY;m7?!7t%oJ;{y ݰb2JDpVI6ɵv:Wg5?nn)?dJo1%7?HjOà];\v:X%C)+4,YDUUUSSS___+APPܰ!} Siލ^pF[ i|ٍ@nvLoJ~LݵK/y瞗 (MnǣP(&&&ZZZ۶m7oСCD:IT,S5=u87CwBDTdtV±Nq-qw[GNZr[ۅjA%CA ;qܰ>S;Orn⯩T6&Z$Kg6%p[nP d`ٌy2rCgWrbJlV2dA4'yKvcdP.ٿejjO{PP=74 ][n8╿9֩Y ȥ32o2NdE/6ٗ7I VθOړ] Rr:C2 U?SłU/Ҁ/쑷בuyQ=hN rKfݘ#;:s'FpKƠ |y>70[Ano1\~;8YIl{}-]݁S*M6m;$\ *Ps&M;~7ڳ}WBTklSjvzkϥ} z9إom2Jmr-Ga=ʆ3y((TRmʟkl@4]vK\/zPލqy|CBomJݽؚU` l)8▻ߑts6<2pHSm2'<7u  b2Z\Y-'AXi]jSdƹsɗI[DJ|;0O첢vH=yi֝raBJz'f?9(c mVjIs<1_[w qE ϯͭ)h4̩{ƪNtVXRJ =lF#L gN{E9ћ4%$E#$ԁ4H^mt_ aypy]Gl>T0B|݀#Ι$ú4Y' a<*!#/tRQ Ԧ3-EM " >*_:CɯRʭ4Dq3j|U.)qe¸SlK:o% fBz)!=e~c$)9ZڒRB/kaZ3*[3+[YU֬* $&Ds8MA :ZF](l@AڒuCc^?+me۷^r_ Ae)E"x[h]t _; wqVdE'~jPP(npßCn8}%)7ZT)sEFdkCA G8%,MS7h=ދ@g͆ nɧrԒ~hPP(qgEs8Ŗ~ "$iF)~咷d;D-k3y((T7A+b.pMDܰ\opl{snvԒޏBKvGsM)JHgPP(cUdjgn=őKg~0_7h臕I .ͦ)3\(e-h$Dl&ҒocKܒ2nK>жO&}IgPP(!$xiʺ d) 钓d) ]ycy r̟ I3ڟ_pei\ϟR7`x4Ժh񗣷9{1/JIJ1WJ4.PsqO􍆉{i/HgPP(nDݍ,gk].YT]|Q_tB~xI?Ӕ$l5JTw`sajIbDmPP(*՚|/w#KłBWd5݉}ο~DQ _ @ @@! x7J;"9ٳvX7$l7KӉ<\:cl1I ڤI! 7y/ 6xLz\$aOL*_DסMJnW}%T@a46&n1Nn|>'[97k43UVFVˤ/'Y=H}a4- m:""3~93 V :u ]-Ӝ>Yǟda9X&2]2.im}nt%{$?I*p㸖amG6~rI,+%TxV:R˭J#7xgjsL[AO]PPǴS #k6>>m mlpU'y$$r(y8RS  E*aUU?58:יV'uvIOenRX.VQ| k]:"/z[8%܃Q\a_fY'7sw^S+Č-)g}rMc쒫 ޛ^ؒR¨hM㵦nͮni;UՒkf6E7ϬuW&VXǗEx9-QP(.y'FkzoVSP^ NKV֚&.{UFIh4Eq|kSx6 |n0(Xds7l2I> ؖZ@sN{j io +hI*me&%Q៰Z2+[R˚b ^?ͬugT=+}]|+@-\Х,2P+O?:BaBL6&5%76J #[*Zm)"ދYgK6. Jn F\'nDMbFF;76/4ZR`_МzG;g޵" jڰxnSRΔ54^e٧TS*/7~C'O^fW%)^\ZBAusXOj넔K:q5Ɏp_EkzZt.~L5FyJ|a6&ֺh~CG-~L#[2\`X_< p1( dqJ$I`N׏)aܠYڅlR#?< p!nt1!qÍpEbխkO-Y7 .d{䡠PPr.u٤__p%+ݥ\EnjKJKvd[s< ['.uPj7輨0My")782 Gn`l [2Ⱥn{vۥ3 6A((T\:y1_ pPa\@eRI8إ;3H94deιv4 `Rw37s㐹ߞy?'uF7rxvQ:02;fwj%ߌZKVʴ (٭٭>ov#g+:[jilmB,!WVY !9PP(nrYʫN*L䚅sn@8 A /J@"x`Ŝ.i7g64mrhm;ZA  zԺGIUfIee|^EU5u&6Npg: uZh~$9Mg2:֙FL@g쓫W{brhoeT/*f\i)>uzoVoV_V8Z65k4F5P5[8 J:T-k= D /ST |4oz&HH^]Ja-Zu&[I|<7:_D^L|Y:"t&<121&>YTW#@4ހ@ @ 7 @  \߀@ 7s8xŝ_/9`Ϟ=GVa믋/%b=7t^d}}}E*"%K?˅gg ڵFEEb4⼰p޼ypeO~Ǐ%,GGGH3fo===zLL_NJKKg͚EZW_}MB s]]wy(,+r<{LZǏ;h_D:h 4>8x IۋҮ($\ Mlǎ_~ իʙ3g>zHLLL\$HܠچN@j`SSӧKӉknݺ5w\$aˠz̙sd`ԩPO>r-Z}}PC޸qDeȐ!s˖-5g˖-p1ȧ@XVp FG- qʔ)''Mt=0[eA (^vE!|WwwѣGReUUU ><<<+Cn1cƢ6t `Ft̙;wB"x!`AdK+~p5X9\@d .2 lڴ  |pQ( ϐ."+D5(xz'-E;vlD"T-,,,^)K\>6L`F}J`^nˡ <7"vĹ H&\0ap~'O@?$A/ػv+b(yb" ŋq=z3J>3\YYYԀ@!^I:$*((ۋx+ 7j(-}]A<== \ !7@@[oedd\ ҥK#F())!RNzq"#7"a…^|J*^dn WF rss*޽ ^-K\>qqq GH\d a?\7"}͚5{.&zt p7r}O? 0B9}H! ;vI& 8DEB`C4Q"hk`fK&-KYnIf- 8ip711qvE"Mͺ&55J,{|>K*z|eyV`rG*sttdVml6* lO|q.UwFBRV.J.yχavIG:v:# jԜ`0z"O날մiXgWӱt luM6d2Q|>msĺzeQw*HMOǪm2W3emmM< |i];22TPXCjǓɤ)TVc/oj57 :h4NFʶnԗxwfûluA`z4MfFu\m>==%:TT^b=rA'olllmmiSvO6 J~v4)\.Ndkoع ԗn7@6@6@6 0`voX6xRĿzG\Vٰϋ!qlxxxF jOf/OOOKKKC+ SN9CyX|~~~RC/ @6 dl @6 %IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/glance_db.png0000664000175000017500000032274600000000000021012 0ustar00zuulzuul00000000000000PNG  IHDR0*5~IDATxXTY}Cw#UTr$̱9 (rΒs9* ;̝{op8T%"z|6N:op 7pmna nnnHnᆁnnna nᆁnnHna nnnHn6@?J_pQ]Gn!_?pm_̆c7p{_ ׮][#$6涜oSn/m۶9&%?_.&"$ɓ'` &$pm@~I9|6>!|БOz;bBp흁?fE 77Vl3#s孭$ȓ$ 7p?W갽O\ y??H;njkk_|Wp wҿo $lgϞqn RVVViiiKKKn3MN Ef5t~yo̒OHۗfξew\ s}T 555}@b7n}AE3ܵ5_ #BJn_DC i!>L_4 iL}HۿCJCNgI*;z<]FYid.^etu藛o;<}]LFUJAVDg6nA=#!vƉ2e6)xg4F2|^8 }Sײr V4c1&O/]zV n˖8z 2AI_TnkE@[~KVaN:x tPdr: \d):W:|ł6wO<7V x \p|:ܶ 9 dx_J͖8^:?Aϰ왳x{pM C#Qm[d/^*lHWWׯK0!&}]@1:ɇԇ8#ݑYM`*;XD( )u@ 'o4Wi>y˳X{ɎvetJER/۵k׶I`-l_Tý/^H 4 YGI 5+?Q_y  \䚕mR _W9S8f1Eڍ[X6i+S?ɏܰikmW $ !!ak kiiy˗/!O?4 $Hdҳx m8H^MZi<4ᒱ8eT䠴 ?gB vрtA @ JM*͛7$qWf Pk8pO,8A!ip|!\.o3-\h :x^JN6У|k*F2G %U3>pZ@^W\_\\\[[ŋ*|u%-brl,&*.avD;إ|+雷RF#4(Y\@@p->- .j OdKxF{`n:̜5Nl95gMۤ( /4 W܊ H!޹W A2ڣpGU12ƋCEQH.NW' $lؾ* eggBUk<}9H =Zο10&;A}^ڕ3,MS/sB#xhf1@@Xc)]`@  SibY& 6 $ I dr'HH00& > $;@@†4Y4,Y  %2iN0n0}T $;@R<},b{fO۟FX7i5hu{;z kcdւk&dM#^X=iV7lxUf5`V=h/ k*k}Y9^UrҚI{^F&JGZ FgȊ@X0M@=Y& 0Aޱ(4x/u~+6 5pW#j8Ѩ=i2 9ШF%\iP ˤ;@Hh~4]h;ƒp8hsCшH/8  ` }2 N`nTMTѨ#:FQ{шSy;h: $ B :^Oơ0t4hHذa }x c>јuoh:HO3thToR0>8F-Ѩ4jlFHH0>ܡΗ@iB޺DM:*}AuU% { ߡ} ዋzzz޼y͚>tddjHDoz4=p*nF{e@ K24= )x}*B MH(v $ iE?F9'b; ߡa }H }'s $QIM'{#e:QaU ͚E(iE5/ 7:w緈kSupEwSF7m}#QFq:@@†сu`oCG{͏&g"u;p\AYջ$3K(9jmKDlLFLRV[τ @:q:Kh҈#YLZ #}sý2 ;o[476I00=cpDRWpiyU7zQӃ'.Hʩ;s3TY]WRVY@bN-I#]#3Z`ŝږ~W9@q_en_TWnߥcy3f hhOd'zFO!(#lyE$qwhFai6q{wБVu wZt9biX{9B%p$8.(֍~ԩU4ss^!3 q t?|$+zڸFe͛GY9c ! qTb]JVq#&}oDNE P#>(ip ޷(J+l1ν(ZsF.8\ju(tGHq}pzU%ly9[R$@zwO.mpŇ <ƮWvH ťƤ䳻H"O&އ|h,^(ҁNMe մ ۸ %Flhkוͬ9k6H6X T}e =T+*XHF:x𓵳?'4Kv$00M(>џյ(մ oY{jug.@$Hf2"YwA ˍf̘:6(f]n￟[ػQ#7mG4~RMS[MCw>#ʪVdl0bt 6{ M#g/\>ǘSTuE x>n`Gyid`ht{k8ȈNcvX]Kk~4% Eރ!Q);2^<{YNqK7Q{ӣ4 _L L1x6$hw1A#M&5-O^}%J{۞Ш)؄mLN rqw =*`Q ׯ_ Ȓ1I=XҌ{ ?җ.]NݦxO`xPxWD1Y-((S d\r IXd?ӟ1w| O['0zhPg ̫9{!T퐠Sc?ƑhZ8X9(uŽb5MkF}Bh~K!&UB~%"['Ӧk7L`;; 9}* @\sߵ;,\à w 4&HDА~ʡG#| mLhCG|ȩkD6|u <6mI'4CR- J/H_֮]H,sҲxx<eEҲL G9,AKN^I3g> ?:VMB@(3bʔo;! 'PT&@>p䤶JCGOѶk7 9{Uj;! :pk!1B!L``h#H@tǟ]p4&HࣥvhWRG19uѻG 9,*G鑫_4=&'-. 45{.r hÏ[ ACSokk;*^۟" +(F.]74S5;L xRyEUEe9sE" EĦx "%*ᄰTVF{Px;+d9p5e59s}H1)dBS$  ჻zQZٺ]/^}^182* w *ӱ #+{wiA3/W\@G$Rr{Ni4>M MəRpMjYΡś$#5UM-k"ݮk1y%K7,8x!?p힂v`x2y&ĥWv$p⨔BY;z}J:w ;5U M4oBHwAeug.QPx᭠zI#GP =I9}IuF}98](({79s1b6ϛ(DlW6t uк]z;5Yr#EKα͖йՁye|L'=BC;$=a^C@ZF5w<'Pݴtq;{>uir~!N]M[ŬCGe-,\R@ʩxJSJz͊ʮ@v9vYft.@{w&ej ȸd.&KGt@RgoЧk1ĜD4gGzmHpRЀ4 S[m+֓e:!uAv#S30;5}bh617 !QAstP_lH$~cRHv#')# :rIiLqi2.Aw022ʤ 6thP1b`IH+?"~7U ߺvFE]{UGHaS^ &.>t ?{T3+Z#(o~_ljR/# L[M=L 6K+t79ni.乛736;ң&pB\|-YB@2{TEC؁ &)(]AFNnbR;lipk7SCONA#1q)yEU 0HJxd~޲EtȺ @<\M`,@B ~ۓ@;:HLFȴk?nڲJPh*ME _8+ZzF{ʄW6X ?$$c$~$5V AGZN ~qnWٺ]|Hߐ@BIͩaHLqh4w@nB(/DG;pye?}ChtqI9մMH@ϛhKSF::e #pç L̑O?|z%J*y8B>:}]Ye=$tnD!n%I9twp sg#Z`,9>$ Љs7HQm5MfH$ȶi&ɑM__;v:Y[[PZRRRPV۷\oT@MuQusOc3@B" ͐-+U6>k'囧LF4*=QuBYZZ]}QX\%0އFHK"gol@Hն?Cn?h~ -!m.'DЇژ|HOFs""QY!)#u8#: %<z8ݸg[əH AǁF&I)^/.%Oqm+g>s6Ҋm&!)G $-Zl?,*(nHM#6(ݽMBBFiV1@rݴEwμfC)SԴjz 󊕂|6%) 9U=$l ͜9^deeYɲ kx 4 } A)oۼuC'/ҽJ{ :.T,}$ kE"(9[W=*iͩ|]N`#>,ewfT>eJ)h& =:|ٱs Rvvٳ+5|4o'g1+Z_w<4j4*okAFڶ,4EMF H ]v+6n\a8oL>DC@ IeNuhZ}2˺4ǐ*Fg`D/72?|''oi.HsW%e)(Hg9HRHdʇ͍fGS;Fhhh,^FwcI%fZn[::pr~Bȶ2mLu%XWFFPP:b\#2T,XPbѰڶNMz^ py 00z s9_7=wO?~J TH Q+u,@hIU.1^G'McI8F&f81B[PQk'iDv>}=UQY[$߶TQإNcF^Ezn \<l= BD3Ew3?RR݊tشG[;5Xu Փ숢Cg~њ:J;.laFrݿ ?dr̃,\]΅F T}ըz;+rn<فG@C%MT W۸Hp8v@XJ#M0qwʮ↧cO>XT4"7BJ`s~=}. v#6|M/(bjA;gC5 meU8w Z\J6G}=M_t9'w_MGW@X`h r!MGBA!iYߠHsoȅs|`"4PF PoUÛ.eyxF% iqbF1 z̃ߩs%.^`s'BZV K9뵅WW`ɳ H+W9yK!&@{]*(;WB%$IOO4tĞM6tT.CG3gv Pź;iY5EN[1H#4Rqx0R?!1:rTu!&3n=P$޳< EbjvIrf{n@hL*fVa$كN}{}[UQ H^tpˆGY9"w਑֞7Oj$HӦų6`Fn8C:p<=-"݉&VQov.#.:8WbH(tOxYVƿruHt $K;WX{ 'oxx*[Эڝ'}4` Gݘ|4A 䣤Gp5Gϰq }?kG(FH,F)s# 0k;{%KFvSt>GF/Xll^e5F@ɳ{B E)((b(%"T}7o>Û2C'=CS#' *̫ykxK_QhC~@j~:1 cϳD:qy2]K˸$rᑎ pH 8EKeTgu E7 h"K$eD},^/F $1mLnuʅ'Oջv;{jIɩLɫ+ɛޢCR鋷eThxD#F|~H\. .x)SDdԴ< EߖVPWم; O.12,hiWs׭[wH2r G4@9Hhwg/h7Q$_zĒiCaE$UO^.\8.5<~_HA .f_o4$~ dڒ(V /,]·#C[}Хh3==,!c` IY @BwRUK?kfRkLi|~HFY $oB 7gO &tw:jCGy{$,Zt ?no-+; #.-[$Д)S8nFʚN>s@b[Y]HMqŇ،*G=;oxWH 0IYC4:?nOX:<kzTz% zFIHi`Ho 1r"1ubRQm!M Ia: ^YDThBH(~BH|jAռC[VdhnkR̢:Y?݇:{r&q͇AfΞtЄ&@dVrpNaUN@h" 7 |IC+Lw)bG(!실S.U -Gui}?KVvTvXLə H$CҳЄoxxkzPbt5I,jC%oɮAe_YAѣe|+ 5{NиpnFH,39DY0\DmS?pNj4+7A*#vB[p$GK.qӖ[@R;HБShv ON@VC(ZۻC7v p"xc'A2k:xC t M#hӦKʭZ+%H8 HA.;$唷lR'$]"~X'AT f!D[f|_$&Hew/zufӗԵi}csCQ#ih";JHڃ@v_6gdp2uT Y:@U-E7*{_FeO1g7tA6&OʮoE[FEu_#{Դ u l݃.^:yT =tJԹ }"ڲ]J`Pfœa 0F q ['Sg~TMk?~!qXni 4OQç:b񉟷)\-Hn,|H'b]W rGr{I1kMOn.mH' ?*00jC :wMK'?}݋K+,\ٻԩӶˬZ#"& OV .!7.xR#'50H ݰt5{δ3nZCS\-n܌ߝ#t% _x;00<M4ޡwJez"TX41㪣2<*H.h"RHi9ؽ?{ްШ5oGNǯ{U7?4l%'2T4FUEhB6&^"'R"Tc %7=7(!%7>}F`X*Zi48 }1KLǢ);ثs@'R7HQ 4lRU4tsɍ!# (~S׋}Gc놴W1w.&U*>E4+uHM\D>7 a}} M_QG.X4@bQXt.m}eU #}ŕpB{HyǚbpX{S/.`3%UuG =+'w:~7Xzn9VQZ9f$ M!lV7J݆~rtCbA#{A`Ԝr.42{f]kG2 7tD}+;#!D#5UM"'s# ٺu,mXV]vOAECU];0<h4[D .1n>D^u,=*ȭiDQva՜9sC!+r %_q>D#=ؔlux cx-[·4RD[?$- 8ddG("F8!&\nA4*o rRrdPйz˒Kn4]ΝEB\xB^|4 Yڹ@irS'\9d2> [7*wPAލyi4 }vԡ#kGEc/N4o瑒_N鑎~v4ٵit-.Aha3tDYY96GV 7Zacs3獢ﮕ:Sϛ(bSr;gO"6@"Mə6mҲ $XA 2x"xz)tBbdD;{2K|mv.]#.:Bh;f4 ^7 dD@M0?*!uߵ< ]iT:@CGfG}2t/N:=Z NR^Ӹ0XDŏ=pc.&q}4 '0lu(=U{1Y3w`Iiy2BLGURHC"YBXMS܊/dD:yoA+wq7"0ڟHYh<;7e {HܳyṘFJ 缻ąk!>F{';SSy;^"v({(][Sz宼#yLNy;\4mhzȊZ^\m3eʔ:}BO*Mcѣ{H˫: h+)gr !ш"#(Dw!'2M! iwm.)j]cv윸SWH?wKJ~.X*0g1衱?/!1" y[F!=ﱯ7[y@H49T E.Z R -}RǨ@3Zݤ72u-=1IƮO܉;5tȠ50$,fN@"v,T yDb @о!ȍǍޓFyHYMKBZh78ڈDf:y{D?sy9Sh|6~ig9uɁS7IEC}b^$x  K燮N^\ Gf p-(.ZNYCJN3=H "N\|BɨI҅Z(a3uvt>UW:d5$dUmiwlfϙG J@Ύm&ΞkH hȈΩk&i]XaļW­\[!mjFR{op5G7_fƣixG YA!w2xt iunxF g%U3S.2ȸlœ$:;Y!=:EՄ&h@~+ISK MnHLAn{7FIYFJ 1y|F *V/ϟtvLm|Eq0i7Ԓ]tj7<'86[GeJw h"R(Ca}VygVY'j=%;F|<1%F,F`# 1t$K$FFJwrO<3W,ew4@),:yڴR2V dV=F@ uG>25 mD\~D"^<[9_~ㆍ? +dYfr &-~ɡYݱ('O9sTnV4䂊Uiq]RKhr7vDѽK};䖒[#LhrssqR[F^y6q*Ե5*ZJaq+W ߯~ݍ{FJ ,Hoڼe舺!vSa YH@nܹ Ul]BhG I  gw#vD@Z%(lNG>T24qJ;dpf $ "ԻKɽ%Vq {8iu*^@F` =0TP4 I f xI&! t  RU/Cq$0&jjF~Cg'wqbF羻DPd*@ڼi6!T mǬl()O`v )Y͢Hmg΅+X] c>;7:%eHy|[IH2ջ~:̾нipR(gx E,ىI)@b-b$n x߲v M]bXH-ϹaJ) @3$2Uk֊N٫H51d} }KT}Geนc^eGI][% j男Sݥi 2K򫞌O"/9eQzQKVE7,t4@¶&jhӨiS׫qӨeM8hT؋i4h4a@%>w{K !a F464rr+n3#ǝ:y/篋gni+/:1oan>!蠔"="hRѨdB4y8KMc]w6 +vN# FgF-$&gnHQ^y󂅋_0KO+j~J %&m!Y$*)ǤAO?o4T4P gZ=$[Z? O>Uh"ç]*^ґS˅FiEٕ=,4J-jˬxF ea QhRV74WTd񀩥-#o'*(kH)3+gG= miH;$2ICK_IE`ނF)٥ ur@#iCD;Gk+cvEɾT8 Uum[m=8=kG2Qr;fR;$2҃>u+u28HJE}F(fIeFFp D= ٹ;#CswG⦱Ff`\u o]#5TKLf_uıX7m ' }?s;{zDQVA^8WG`p={/=dF;{ ~Rb: |E'/tpe_F #֙某O>Kǐa1iK.#s7mqpA|+Ȋ*A!Gw?πp8$%CM5kݼݘ>(V ߠ爫^WwAho.IyecojNK +Z࿱zk{wUuK7,0&&HdKnl^p߱i,ѬYQc]/CGpo QFI;.1؀e H~XđFYU}_9:ȺƎ,%;f3JvdxCB"Y"498Q!KJ (h=OԶ<X%  2BP)'P,5?jhL``X#$'Z SMgSs We_7çP{EË! MCLC#5M4v=yr{opq}bRLUwIߣ7m "+man'.%c`*;5t";v=pL^YT ;G@CݸGHIj#RҊS g֙Ҩg/ݡ7|ўY;6Z~̅G`g/ߕSRWکʰ^#hሤB pq n↧N\S9v*}r񎌢=;t<Hɫ^eKBh^DknZ =DQMW\FYgyLF59tDࡡGBV]GD *zKR~.'X*0Xƍ m;$ܼf4:vE)*(AC`ن#P]{\ƍΜo :rɾ$E*+턗F)1D,%wR#DCWWS$" R?U4 ?.򗿐?C8xn޺EoiaMMX# M }F*wH.{`OC@.k @sH Yۻ Nc}OsJYUԴ &}̝u8&  M>cFzWg`ԉ3W Å'SOЌzp2-=c],YT\]73D ȇry? #. N8vGHJ^8.n~}y H!N;rGHRn-uw }>u4yk܃Wi 7Wut374\m?l@@۔)9U=(1R22؇Dʧ@z)/,YONd+/ghL&@b@CB\՝D}CGF7?}~HLŧZBV Yb9G]c$0H?;u,M/,\hhR솨dw`$sgEH- E&f;\#,1;Kl@Jͩ@~yUu7$ hG6N觖 qyƑb2L qU}`d:?k6OP ԤRg;oܳQЉJʅ A@XW񲴾਴ݠE%G$R+4.șK'ݨya゚Td%*[Ї'%q펞# Rr*'7*zap^M_n y#MBbH߳ rҊW艹C@zYZR`t7=F/-eva qRE]ޙW]_\#$@@ I ?Y^tT4t/Y TZNL]!ǍQ)@ 9u;$4DzHd2 US>^HM]ΝBR HOoZcK{ӣjA# |QX⽨cso?U4H/oz;JN4:ѤR;H!̪4xvF~p%b2P]{̑+Fˆt $g:~YrNt#h4:pF#6;" q-;LL!TiyB3mH6.ZV 5 $zHPVogK*s:ĢKlմH eW$ $.R!Y$cH@@ HwlYUŤHeUb )4PߺP, ;{!-mW2ًO;5 1%eO,':H!/ Hک܁dpұOd S HYT5 H,Pߋ 4ʬx*uKS- :$,,Z2#PnT1Esz'ZYV6=kF kު TZYFu-ܕVN,!civI#9Dnvg.aM*M4:P^^^O H i!54521'nzO*j `:Śί^#$F_T;R+8h"ss4Bi#N#'8:Ʒ3eҨ ˛]\}HpA;7:r~c8Vbja@T1A 1fv = Z:@H)F4ru BV޳NR^n4 9zF6n!@RRC@RekJ) $Fz )=Q&֩SLjI9:ç*[1&'HݼdH Av~ t Wv *@v/)˒# =p@u  Y+V Vܼo^ }ye>Z &fFҨ%唷lg镅4GФrp'Q4<"!]BRViG8݇nyWQV\#NLR.r ;HTV ƕIYr+V"TśPng|Hda;u4Q1"%YG)m*CJ;@Rܩ#(^hFHlV $-hP\u+S\U+FHFX5'XE-T&Ɛ"!%"QDzsS׫VUGd:]"u/["TXYAL-2W唷uQ^UwVi[g۪`;Ӌ[:)c_u[YS0洒NA#q\uTH-a ! aa}%48 qߘ:gv莟\|h g c׬(2tޯ=o>Z"{ֶ>myj~4nkB *:_w'05{0&&Hos+l$KT6U4$qsRd(;>w (4@b(4cFql^F`'$]idSPLF1!sR48{{* wCH޽研"R "‘dLIE].VhRU5>: F0H 57ǐQ 4*|JШKnM{NiӂЊË;}a6qm}jih[1&5!@H1hT܇ʽdnQu}`MQNeWqODZq;1I#$ewAao* 2ʟp)a 3+2;x,# ĈJW"dV4rCaiM͔TmiTV$7,EĹw}GSȀE:uə%,4mIzIHƤ0꒳Jc #: |ߵvDI'1k_Sv!): J5wH^Y  ,l ͡=y ވ\t8$ {/\Yi4yh4!@lbެ:v*b5m M,+q:.AxԿXw#91;c.֡augζr:"D 3e(3៘17B\ /eH&qRKťFF $nuw bޔbXbշQh4X$v!eQzҋ? lـD-ӍR(@D# $9uH`8w{?Jܦm=Q PR7 ْ].|;@"EeGEqi Tcbx9@ gBx? ~&L8&C(" dO67$ MmCLCb(bބC_q_IJa]f><2m10 a|CHF4*k~L^Iܕ;@#$NGPۥ/{!]K}ݥgZRBvKT64=VPCxk2tGVI]qwpKie4tv{$9XufA &Um\u}OU>z*ep[FPsOthroْ@ۓSՃht Ka ՌCŝ2JFQ@iMS5U|ǑHޔ)4H?$}=r=v F~VA5ԎQDb+'8e oY T ApFAH:CCGp3؁ t e~z >``XuNG zNM]LCFVU'ᇕG T x p7JOJ%Dڮ7u`e8& M>HTo 3 {gSOg/FzA} Sn>Kno8 't;֮ 7ȣ/-wA2G2ɹ *2 ;]|"}yO΄Si5-9ڷڷջA|oX:×-)%naO}{p)V X:j4ʍ頊'H;vHW:smf.,YO2>{ ;Y I+˨tT 4pxiQWE&%R)4.uI@o:lj" E}3.ްrp "/ʁmap[WIhЈ@6˼Qȏeތ+W* ._!q75l%m,|c ]*H,.&s7tFھYifD?) E$^GفFھ9.ɀtʩϬ$TPIAMĉ ( 4ʯ{۷GXI(|mC,[>FC,sC~mE ]f/0&]oT:R Uk)-e34Q4/&q$Z,2y0p-m@x47P~UT>1'z%I{W˚ji& $Mʼ8*׶=i']uTyZ[vOETx}x꫎p~_^t*&U4Ca]a@nUNp톸용+%4OE^ڕS}"(!BUG)ŝ4D;i}:4ީ{uS]} pw7Py&LE0}:n`YhOi&ѨgMHF𭴵9MO#i%FH÷cɇD4+&]+@bһO`ecc#+b#cSH<긓G@Im*,B3i| MSyӻKѴ.F i^wG1 FlyOH,e,/B(~F3FRHDnjYW;]qã%ïYS_8Zusx(F4itFIU=[>lzRUCy%5e5\L$5-=i9CGOT5[ڑ*nq)}㸔r,VNҶiăF~h?r%t;tͷ|y'!h{dԙKw1J$Qi_OiC2A !QI[7l )h*Fx/\8)Fn~ o^#@J!TW^߅dOF7!Ե ÓH˔[ld KG@N">ܥ~%PL-h^Q5-} ނdnZ#8! r]X݉ a,X(&9|BF '0%DT-E_̙#~+Xh2BᆑI4FJi:MFT shF&v5>'X@c|BFY(=}C/\/l u&QT4QY@BAI44䆍[|P~ #OFyeM+&9jkus?{qS 9Hsx\fhL:Kn>RXl& $u9a >}uMHĜ:kGHMAH/i7H|:xCGN^!!\zԴ H̡י ߸( qy㽎\|#._>ϛ $g_BͲ'bzUYIJh|ZHeHy ’>|v|n#g .qHcYb{(wy.@#Adޜ 5W )Oe8mzAO"2,[:K) *2=sЉhDA4zo4>MX75joX۸yBnXyq$C2+@Qg! $44jiy4ij{iT)j@G\iT಴*Mj:G>2A ѻK{w)GSԧ^h8MKPһ\+!k3k걑)$(r`tMJ#wfaujΝM|ѷs'󈍾+fDKB(ǂ?2H.F4QOG Qc#bld{^+6"?IS#t_yFѷRQY}{$=uH@h? }$CG} Ajlcw!D̊U7dUT4CcP7LC0<.ȉjf<|\JV?W%d]2m*ʚ1igdep蘤+n1 w?EM={okg@zs@JAXNy;HLM2Jv^@ !$UU`F4;f"?m}cr\8YmoF{~iŭo0tdR:eGe ,{WD:xw('$1w:ƆH_~:ߠ(FpU\GgDG@*}Z$ٺFAhnu;vzXR %"kpO&Jd>w72 [O'pg\%ʁAymxW|"R UH)Z@d\]m3WGHmlFH2XY~C$Eu5 z5'kDB-]._uT?2*" v^Q/ I4΁gh悖!RPDRHT2 )6%f/}! 1 +XD2c!5WUSoHl4O/?C/{R A "6õ[~@M-5>En $qѤ@jA@r$O&a,@%T5$ǍFQ ~ Hж<@{E.[C7;>tmQqT޶UR7h07eGky^!D(2{w=C%QO]mӒAHޠ:ʆ{2~~`,x)%IE3p]tZ|hI#t/ݲ8 =тAk ;o1n&Y垴z64)+H Q i՚u!iHκVk7n@ /YUU0h ̩Zo\RIC "!@ABKM^ru@']/B i:^B`lnic焓܉0@4.,.s) 9unH\b3Cөe RhLvrvixfiQTƏm،؁@,@jy|a4J-CD`d3"eqk:xj{-$fD 7HP2;7a!!J#}.A !sڴ; )9H ں?(* uo$HXMEH!Q-o0o5۸B@N17H,RDP\n7jo'#>B\ iq17h^Q$[Q?դ[T|Tԍ8l],@ B@$D&NP38te#CN^_|1g[e0dDe 0ڵW E@RTٴem?n>j yBFysJ:T Se@>-@vB J {3S'PD<1^1 ]+ TRec(eH)I%@h&{xe-YgQ2ƦgJVP7A@8 y%{$)V udR;Dɕ@X yFBV^U_Y)Fxf]pb @d+)2!SL{ꦃF HӿH!Jϼ ~Peqs %6齡p*{$鮽Q{/ 2#**!n$4߱u[}ꫯ/;Cue.Z, m^u?ٖ-OlH^EGe7;D{΁KX|-{Ťǁ3H+&ODa0 a$iEmy-yc@WXUBk7l%,W=w(.;ƞBޑY4ހF*ycr"*ly4"c$eeM'2W7>x3EP~UQ=UGC yMSQwfVsTeW :J)N,hF@#HÛѧC#>] j.s҈HBX.X֡#D#HVHrwΙ.F4fG @6"y=uHSWNS4@yFԹݐ _Z{~ qE#>c]#]"a;rkdU/x}tЈ :$~FNQMYM+*1#cuwb6IqtR.5k"Qz+7G8ޒ2EEwI~)M]HRVPW1== !*"2* 72,$6j%q0ˆ3FGN$VCkodip踤;#٩W%^2 Ki9R*F;&+j4u㞽z#*<cDzQ2K(ZyqdVHhho }"CGSy[:eFSPl= 鍇tOvolLF'/\vֻAHEi qX`Q୨ Jdf"4nݣ&g齃cKk;F 5~a .K+{lHH H.>Q!72~W;V²]{E4M֤?,_oZ-DQw" Pԥd r&`IipR_M/4Z8q6@ aUTu`Q〤b NT,!Gl7\qO8 - WЬǨP6>h v AB|#OoԞ{mI' ĥF 97&gy{ryS5 =nLΣ+@؜ƅ /?&?-tΜ%ǝ"ŝ>p72LNH@Gwvq|" k̍"l&{]vY5mIw9 _S>cNʣv١nĬ2ڝQBD|jHwDb.n{{ˮ)dHnAȱ ց!)Ѐu>A B*ƣ0|AԀdHzV4|d# sFQYde0jXܶ8_oL,%.c^-?}FEd C孏#7wP)EU-äFxQieɥ,#㦵~W8|dE% L14MS4 L4\Ut-@X#Ȅ D%.;bqyq H3R GN]V?ʱn~̝Pg@5 FJFڇPg݂EKK0i Qƍ8 $[ZV $n qƓAq3${{c 7LXTka a÷ihr%`,oY T E6P'B;t<7"rLC-+ iqS`$Hoh[> H UP bʌH*;4qV /f6?BO]YyZ:1Ba+k)k<`ԥV;£incz AaІ;~va@#I{cg# ]%Ҽ#ehBm}2vpdRiZT~uђ)@&(95u߹"_-0h Qg1px+87B@"+gxSLYf1z HPyҊH3T}O a\E])&0|V$_+pHxO RK(g1L RU0j=T4 x)4|W?F*AHω%!hX9ԭ\}#._1QuqcƩ AqYqKI u"=e7VXw?v݆-;,8$}8 ʿ~MIctKVT\tωbR>:P:h:ۋyw̺_q>~|ۏ?3C^Qol.cᢥBZ'1h QWrxw ƓiWֈ9H{P@iS{'@hCzo9,6zo\Cc2XxiO / PB7 $BݍEM 0r?uLv 5sRCS90&N(`u #vu7pmTʼnk,l\K^L߼8$j>0fzR4nh=I {L4|# A5/! ў}oSPSa>0j; RdT eR>i H/0p !ޢ|Qq*&Wr2R0@#@{Vv/a g5|4z@z/O'bC19q >o 1 4>07ácy̬Kmم C=ϕ4*BQ AH@Xx)˪#H,@&n-iUVnykJkcE5HXzyqYU$9U=0k>q /3Bov3Ps<\ XcQcpm 򆞩|nlzff%약iT\ ߡaI}?n%mL+s$}fھ!Qmã(ިa0f>С#\tr;Fz~+f>oЈ@z?i:e/4?O|Vwdz;vnxp="6l/ @ JDg~ܱVOMj8=7>}6OLw q0 A $BR,[u?^q 5N$VgÎ}(<ʮy0ooPNU?z):A`ͺ[7 Q\^+\Q5#H3Fͽ#{ښJiW 졣o >PM]8yTvUGeݹ5SQwdWݟtD7gEКqO]zy_TF}N"Q1'yft7*h ѱMOF;٘m£&ZZº Ai,=_KX~L#H@Ҩ)$4jz9FHh*4kuiTM"zv>M!F3FpH ѻKSMdw X\D#Hdv涍1FfboYSc#8AhӈO@PvVD]7h 詣cO X#i`D{x F,|EcxFT3P~U%˚z_剳wU1~y7MN#~iA-{wZzw]b&v@@(khro_ee tZbo#UFAUQU+"! i ohƁDi~޵;LѴiͱQޘCGo?ϛE.[{dd?Ā4A9f&Ʀ=Cdޔ&-&o^ƧkkX@}nfm3r;q7@jcRFQ~mlYi\s(Q݃&A(노)ML4ׯ_86&֡#>lLa eIX\^G h]]]OOӧO?q Al9 e\ {Q}pu uܷEY}x3LI9ռ@g/:y*MNip</n1)T=tD:JhR`Ujjjiiiccc?,>>O7^n۾.,>ƅ t4f8F ?Ǐ_pAPP>|,[BK[hT\\\]]W^_r 飴ZظTFx4 U? @R[[[^^^RRL}J2Qsss__t~@4>4O'PIpShMI--- uQK@ISSS{{;u3Hh=xɍF_LF;_||4hy8aFkҨsu hT8niTT?Pqh4υFu孏iTP{exfhځ_B088C/1CCCmhhG3HzK]3i|c_X QMLj -ٰ;pQqfd;;= 3Fs'4=6wLM;t;Ͽo/_2=xً/E|oG3HCU p$j$cnć؈[O8^F9틗,U?68|"0:y芹ݬYrkQT4ڵW~DR! 2hOaO7 Uf$$`pkhYI1!*%i44ڶGFmHLOe"Ƥ+: ک@j/ʞ[wV ޽WTS(ɩWejnb!,#ǕԴ/䔽Q-}\&)x9_H ;FTWwX\6'%:)Fޘ@ށ|tť ˯@432 ΀3 t񺥬:H$ḙ\1tĥV#Dy-ҩ SeۏL$h2POam-Z,!t 52=L# Cmnzuq 2bUKb Y:: hnQ H.QVP M 4P?˿>&;<* QDյ lH.n$R ! ,w=zoPLuˠ#4;hʳK;H=}cM[]3G^Iw@N>HdF1Bm_ō+?@&Kll?scB(Έ I8}jA mw!Ҟ1Aqg/@ZqO8 {Eu(6B7ظ1"^!,4*|찄|Qny'<Χ@wJ` *iv݊GlXqW(  AVU4jR8=6^a?~UH֬-_0UUr eFewHp1#DQraz;Sؘmܼs/@r Xq=;ws@NI %ߍ#(0QP=rD WSGxC`dr_ct-/kڇ "rY׃UQS'ZYS ۦyeCHpð$'`%fWAVP t -#Yykևf# 9x./$RZ(<<0!*/LNCTWH"So|# U $Hm#uV gFe! ݱB=u>7}GRTZ[v'w }|L}ބ{H{jA 囖hr "S.ፀo~{QV dq#.`},*SRG42=uaR2/Hƍ\e'/!=ynx@RN\Za($$02 9{c/ odn媦mhdre+K oB"4 $DFj;9u U? d}xrZznsPT= 2;d Qqs-pC IFͬPdgYh JpN#HA Bk7 HhP鶕ӢK<<I ƬXdv! sRD Ť<{F Ux}CNY+]#,L.pú'N` H=1Ďl8)"1hT فYr4W#,^| @J̩A2= bP:hhr, iIICeܨj, 4*m~B ?ۺ0@p1HK!F4>>< 'lQI& @j2 =x_~ǐrJISo}7F@yޣS7)cҁͽπF m GDrSs6\F z|HxJw} ٻ%uhND Rm3 n$D %u7P^Up^ʖaFD `>xDK&ϩ3 Z-hS $Zrʈ;IAUAHY@]HY`dTw@`@q7uИ/= s5BYEuH+,=9H 1QTf}~itrH@$b~ Yv; %k(ە/u u9Ng ]wIBCPRnv A ZB`;ڱwm@z _Rnn*pHA)m-~`Q^U7 tugx@Jap#/Xqn@vy0-7ú [2K;0\@konW_}}Ө}$S1𲼱ua⼆70YZ [׶Fխ&5Vc\Ayr^ T8u*fܪfGEϮ Q1 $Hzc-SglKy#N=u҈}LһQS, OiD[S#h }h ѱMOFM4>> M?4F}/چ{}|4*iDތFm$Өn(QnUq#vT5>z@p4r(͝784 v|>AJ#܊>FhFMN]\>br?K;8 #,Z~w^a~iw}3w #gxF'{%F|;/ ho Q0~һ؄'7OGO|{> |Ii^Һh2QY#= .޴ˬYqYՈ=@{DkC  0Oz۞Ɂ2$@zH@HC@YV>i QhtZAYm+'OSH+kWah_J3:sy覕m[o*&7JVY[TB^49}HLR;7 9EJ)҈@?nݺqj?}'$ Ce!q̰_ IIxyӂ0s6! A~㦭,\Y 'v'0cŌNZ#ƍ Kī_7l`"6r, 'QlDmtD `Yf1Dscg=e *_575@H&E6~_|1c?Z#7={o7Bםp< EOΈ.kaR@T8$Nkd,\|"O&JF ht!Rnhf_oRݎ^H͌m=|=Nr qõ}#C?fQnUgB> CE[=": ǭݰ=n.~!HI@C0 ˢKP%d1y@r ݽO ݴt! !o7Q7D4ºniq篩h@󺆦F'n(!w@! T$w"F\T#lzGKEHc ٹ!Ӷ?/) W\J~^Q q!F1)nڲMWҙH] HB_}tx7g {ː@$&.u; 03oxHMk|R;|MW +&&A'ilw@T+{6ln>{ DH=b?aؠ#]"h+sOSVԲZp~"ew};w^Y0Tur]g1g [ $4n4F?> Na $Y% m$ $0"{6n#"W_vӞ7HGNogϞr MO0{^={-3ꩫl|덪ۆᅪyAu۹*[z޲KN(f '5Jn{֬U3u3*yfܚv3P~C"덲*J&Hp Htz'@Z[[322@z{O];a:fTz M7<HɞIi4@=ӀPԕV1_E hww'O@4x{K4>ekThoYG|@bW+h ѱMFEF9-4h }@b'P}p[Qm#bN#oL'-ހFE Fu-U:4ak6o[:hIM/2ڇTuLv L04m~#-%5$\^Ni_s$|"dT~|.[MkV po]xCq9uxi xAHJ5 $!uћFկnl3"c#oZ 7mna;PS}.8coh2D0mlyȸnu+Nb?D6~]vv m -;esĤtp^z-n!/-_ܲ} x1:w.a7wB4nA̤VSy#\bC\d7\%7-vx#\gCrue d:N8=N*!x9QRNUbv ,1<b̭]ncy{I40*Co((] H("(ٻ  ػOy=ubrXlڅ{fql([&n5_z 384/fmy[3j $*Cb2~q ^aVOF')f#rQ̰DW8N c_lN]Zp^Q(v~DGشb`uA@13S콿Bt?v&7p()ctw89 #ջaL>?"]v]"_;GrKb©,#U4 ~Ã'1$cR h{UP*u88N7m VӐ*-GY%F4xS``?VgdܰB<orP$^%dUtqGYt7nkjqiIOk} oL"(>  =w3j} ox6']%ƍ9@="[٠:"" Hl T>q34tɋfrQFX(&ō\"v!/"-[ ]StNR~' &-) ~ 1wtč#x,@bkA ض[lՏEUM˪d^ ..5ӈ},8F۾s? .z-_>{%Q-[f͆-pǙpxj@۸u±9ͣ#a;ضC䠄H_!1oR7W bܤ,fXłz#m7'3b'mݾ+H]PHSc@0 $Y 9eXWW[E?(+Իq ,8HA qԻNWnKdp6:tBQM^/I9u:&$b$LD7R2 ChLFptל"bz6݉z a[p-XPT\ΠiX5ȗz7{j\Cԩ|V؈խ۸ջpk{Dy5~l1H9urZϐjBݒy | 7_A1ypl݃qfV}4;O\ܣB4& H0b8j;w۴yW !(>h:xܬYڹXIm78HVviCcҧϘ!*)kUk2 HM[$@j$-=vR ok'_/[ZT@CaqقKWl߱{.[x RU?Bfw!4tMJqS]G_Dxse )Û¦O!"&nfQI9@ LO1%".&Q"6oފmgv2[jM[̜9篙F4>y%\uҎF_wM/H $U67v ިc )nT_9zļ-F%UQ*lyz"#3׮ߔUԕ5?*io@/߲8w ߩZ!9n@/Ӌ[G [SX2PqBz슞7DHS HNݗө ˱qeAٲx/4֩3h уM#Z5 $z@@iD{Qa# $z@Ө9ʬj^WG4>walջ޼Ի@ H߃o7B*No{ w@d*NS,E%eѐSA4:9"$L;:s|}#L;8z7ԻQ7O}g;:FO܊EEAC'Cc*_ r" U>xg@'3'8G}уÛsЈ=;uGħ'ķK?Շ #ˡ_/ac2^?x3b2KlQƣ4^4^=xw4>u2D]z RkතO]#|omS65xu9E5j0VrKع3,S6ZLUT84zVNI]IM'"!  =Լ*Cb#~w=qCP7%CcaqYpEY]V*Z0uꆄ<*p=79c!+랺hihvNm;gJ.L'n[8BȖHBݘKc7rܱk:um=4uMȉ t@$؆'  6 oJک=pee_čW߈VGcz8on#ԻF=i4@ћ*t1t3#h>C㗭XۃKZ`!eue76lR\&2$8e+>)!g u<([u]- EXop.ue flWD7jPZj_7=z҆궁*VhQQ]_Q̛+?P<wfYzF.>5u9 F-Ym{"㳍,$ݶ' `>?9B}h :uusl]S7B_X2S7a4 W:b hTYHm:QUc>@3g^PT t/Z,]x="h߬'_-"oJ ~ݹCG@š/:DӈVM~h4 @ru:c9EuCu-+]e)6::xTFa-Etr 9E5kG/Qoش70 #`I,!ªNXqdF~28[|6V^ΈdF/]1iDy|@,{.-p}Vw+srʚ<1׬z!'cBl݃R.l&M[|"h ҨiT1ӨUm+D獽oxѨ9ʩ(*Zw oMhhAo&]"_p% 9ӦMzʬF@JϯJͭ(B;ufGNugΜyQ)3}ƌVjHeHu౳jچ|BGkظ-`x0.DoٍҧFD720= =3"o4T`4 ćFy\5ad..%Osx]Fb䃢3iMQM(| Qy1$yD Eg! w7?~Sxq x[.C@\~׸:"&0m HQhƒ{th[v4a>~ae 2JswgFSgaܴkz5Lށ;u@1Աt hTRNHwFu||#K 㲥Ww#]/vZ4(&HF#Hakn4 xUg;p,[/$hQ[H@B)8I|0G-m +H )Z"Fh/_)HmsΏ(6@$ 9ln5ؖwg.ݐWx53?_KwG%boVPŋF协=-zh`@ H'O& t@Fq6r$ GM>CTBf+V aJ>esIj Dyg85 CxDL^鎼˾a5[MH ]U?&Dm@K;^D=1Ш}pXc#(}L4&@Mau7)mkeQjxS[:M)J ԑ3w^xL䆎8G#Cӣ0Ʃ1#bBG&GN[ٸ9gy믻u0vS$4>5L'aHu̎+oi4Ei4A@*%FumO&7t4*o'm֍ܣoLN:T(mbh3RKGm%N9sE5vFNQ򮃋W 5Tusy9֝&~w+$XhRjRrH4rua oH{OIZUS_JNN|00äᢒ.Z,.[+k¸q;We/ feh39ʀ+j:0OAIR 7m=+Q  fTV&0 $ZÛwBjLo8sm̰873f(iۻ'YɃ#'ؐ~r=m^M]q1 *8*?bqqu#fx]?o)M;v;z + ?ahHΩ%L ixC't 㓿MISH+ӧl}_w/5=zn02R9l|q@$9"R*߈J{`Ny;HHPorThhv #GN^\,LRFQJNy="W˻E%B:49BAܡRt2[;( * 7fduƒ:wº`!hV[Y{lގ+wUtsL1@ĝ:QiCWS4*za.|@t̐7H:&ĝ:/i4Ei_?0@$>{RRf1Ш-ک[z R@xe8T5HKDėUM" eQuZX0A!,")@Whʋ$X #@RDCL)%$T -]#Mr :h>o`kdrtmlt4q~HHF>A1͏ޞ}_zنԼtndfn2 w=y`ⲌGIУB`wBBݞݯO^~@Nܕ۸UA>Sfaw`; +<3 ͂+.XYԏ_uLHq#YbvMH ]b@@lRp &RǓ?r WcHGEOF \%ƞ3g͎Jbl fGs-" ʺ 68&TI MEH]-XhTXh) *T 5&Ǥ9@b@|ip6܊u6!-ChKɫsgF9HR9zN1CAU뾓/[օAڲCjߞL L;,ob!>Ax|f(8$xFᭀ"{gȫhuZf=,kOb8ɨdϻEKπVOx_ Ј A#ym@FSF/*4^ɇ؉ H0(cOTRZNi>q$q WBJNNQ9pҋV .Κ5{GbW~^Vi6rĤyN_5"! ʞ}2Dĸ < .];vl: @B81}Ҋ;qW`=bx#Xw~3 ℑk m߱'&sVyOs6g/ $7 pOGqe@ja P4܈ͫo8q]p1ک9dNn; gťPs=8(Sb-w@;Fx  Y \dr4<44 } Aܪj۞6#{O0SW1PdAM|DPI}_UH7?hQAҦ֬Q↧im;Wg9|ͣg&0 $P`p=B^ 8QPRhl2x,_RBhח=B^ 8WMR+64! 4SG*44, M`߶?妃?Gkoly9au}w:4cs;`&gꟑ&0 $*P+C@0 >2yidd=$dN@ @ZoDR=/ q2._ $R H 8I4"&HS1$<}C~IhQ `]#75ƠJ4x (HGR3/ =2z_ z)(:# *zr_$nu*$o$j+H5ؽX Gh hR]vhbK4r/N%F4;Z< 2yM =#;=U4R" Y;3ڷ+-k2<dF:F0ʶAFSF﫻D]@k߻DYӚW(l,kz1^4zX_A4&^u`gJȪ2˷,^i4Ei4I@=%2">O+/:j]::VU}ǞY޹G SPLn에ECRVnE _RF++и-}Nѳ TkLN/:OlL {/ <U[ḪFSZBFYW G@>%LucpgƢ9 4ڶci@/t /A5탴j*MFyu4tdzxlw簰]"nܱWPPe% :f'cޝd)#r[ػ {ZPg_mq YP7K5tpђ |@5i}740 MhwO  { { {#E9% ŧP<#$+>e/v=ju>ѾV0 k> Lqg_0AWY;78W lx&/(G,"6 OL;EX\ջpʭzy@Uo퐰w۝{I=iP='.iJ-#}]FEӐƲJZw)G֗0>F%O;^}*TFmM/Ө1MIiKp =lac.ͺ[4 FYEV7ݱuSQE@ Au=yYsR6@ GY9F]Oje/_k+2K#MA=ZJzVq@/9z=T9xUt73>?Q?-4~3~>7Oy$򢑍k@FI8MO[wG4bFxÆ$RbFL]kW U;u @U)2!A#7tnDA#CBtY xGy*Ha  ̎>G*AD!UoF CT*Aê2H;*AW4✗W qeg󓗩>qMӗo˫󍰌OS! H=S4ʫa N_.XkGOs( g1M DiOd@@? H0LȀ)> "_H:@,.9Hػ-R4,=!ao68@RTQT+^zęKt#{+ZJxiuM阢٫elt $^@2236QPX8GLmk-=Jd6l~9`6WleUuBc4be#-ܴO 5 nD"eTCb3LOI˫^~rH5GI&Gc MF0?g;8lܑ5<Qg|4(:K“;.$"&cWNozA4"Գh4WX u=1^HE%9[;#QFa=HFi\vqaol_|CgTz|L- H)`$wU+0 0w4>'NWy h>ǤϚo粖z4Yip''H4{1o2H<FJh'l*$\7>1)-FX;𣌢b8?s'06]^E >o¨|D#@}+cROh Sֆ"A$g0`Db>i^1U-Cx $0'e }R܈{`*i%nu7F"A.x| F o'5bڰ7|30>".8ҋZ;ހ3H4&F2tH##1-?k{܀ HjZ[ٺ}džM[J{ aY9km[wHoNjb {_a/6,W e1 ‚Kou]HR $OwmS%|EKS) ^\s43K~ynq $>@sc" q(-L%>蚎Ad*x@V7 o3 9D4*|8@#;wݲ'. 槡gfqn8rSx,]MdH:xaDrB=iM &H՘sF)1az%XA됰*M`c3uψ% $b6pw D .[Klŀ;0Vp j;HH^bVYە q{.4*̛Y(nS>Qi鲕  iWIi4)48 }3%F}9MnwO1NZHĜ:`!2-=Ss I]׫? '2uN߀P::N`dHsÿk c]8a8V9k6>_ $ZM/:>@R:rZhĜ:@'Ie R7$m.@ڱkgMIQ$i»KL0>W H%x5>ii 3=oپ,itc%_p~ ill]ΛowN)XzFbnxw``E6.H-^F%\vVQE1"\BZ]RPRpyokW&Hoy))O4E~@xso\+s(n:#µ ͑fc@-kyIhRh4A@KPTw(m|Zï*~uEO*8G6?C~h ꍐ3T CcﻂW[QUj{+[^7*kzVThZ5*:2Ph|?S|،r@._xi@"V6mbe:uA1[#`rlQsFE:"l֑\cМT?ͺ7@n>|G`> [.Zԝzԕvjh=4FioAewjeN~4ʭ0 M7n+iøc4 LR6W15?QRۍhd|Ȃgz}/>%BcU4dTu 敷! :{@KTev:><胁ٱ,Ǖurzv=V-/"I9U YUOU<0߶im.}|@V1IYab_V6fFed8{]iT@`VZ $.@5i}4( MpHBJ> <1(27ਔWn\ްiO`4 rTȡx"S\CKೞL ,^el\^3VB+ *w AUjyFX Z>2)-C= Q*KOJUTbWuFkM[CR"qvfGphqAA{ Hd]D_U>oy34*k~Vz4*E}ǃF4e =.E @qQNU_a 줲eW>*hxNQVţϿ־8 M䙅?4t$!v}̃|$@ڬ3;rJĜf]`d P'Qn '" K L;17-(ɹU jygŦ츕p 5xa|yՑ(B<'-C0¶)=m $4 $l:O4Ƭ_96hhtfc!|9u|#AM"PPȀWO^w M>)і6>5[osݘqo}BRH|?фi5& `ܳ:8}i2)Y%ypw7}/m}3^#-}Sn+6(n]{O-=N%#bv]" H1hMZ-'-ԚDNa<9+U+?{׮>aJ}h$W6& f_|SIu$ [=2֨7$-!g kt52k'?;8 IPЇgÃs+:xiQD*Jɫhe7" ]|\CVw񆍴v +[|fE\F5ndmp耜]_78?di{ (oz쀬%ƒq+ =c\wꄷjα d 5NO#m#tc2";X[zv$@3#V^UWLJQh&Hwťoxq+$eU5BR&HS꨽[Dg#N=*i~vN_R ux+9|H]-A~Z8XX^UD0"j@a2C|j`lkmr ǁ-#iםlCK ^H "ciCBpS|wz5uM 2y0’N{ 7 Qrn58nHO"b܊Nf 3rʚRr*`u^/ cSRףf1-\7/jw8L·أi5YМĿ|"`&%tc6G3J~r:FDbM{uR @ͫy t)DIW:VU67v UGU-O,Th$JZK*X 4' <^wVG/06?)R3_pP@RuKtc @J.V"POM5|7oq%"x3Q;3nF_o-w&# v񒥸A*pR8? .[" ︸n-*˲JZxvSx)cJ$ 4ʯ{vꖭX/ (dXpL,w ^4(C}׫%H.aiϡ8J' bKyG4Dͩ7Ȧ# fawFn6VEǢ^+HAX{ / UtVDuu)hU+^%ր1F*kpn{8*^_%V+HXP;6j+NZ< jp$V{T sD6 OXwH 5 $0XIzI'$ב@ I a%GM@ _IAMIYH] $O~sp4LB4P %]%/]q.__*)!%K|.ڱip-ZGDĤ@v)mz cq9\X#P"iiL)ojV-Eu ZM*I/i%Mh~INI mv&3V!4]/\UPՍzP7Ċ3س $2h4hnLŌڹ{)t 4e:Oȿ -`&u*-htz# ^ HHL*/ /%x#Б IHU5WDOW5C/Y*. !4g|W$0/\GBZ8@ KE!!-/xipb4bRIMF՘h`;o .[AMdM-wBy+V QÖI%-1^ૉ>7 aEdCQUKp,_)H<W '),!hTE{ մ4i(khL>o*}23gξzCT)@ fi@tR>gξ| wMS h4h# HprR]] W:&P_OQuۋǜ£VU<+#..u]+FSuTX9DRQ^UOQӏ2<ϩ:Vu]ٗ^E9`XZZiD{ Q)%҆^E4?)t%>MnQ"h4u""H?o߾_$dY_4iԁo$z@a`q]J9m]TC)QV8Y]by-׿˗/ ?A!t^VVVyyy}}}gg?||d %csJ φݻgϞuuu555UWW#LڲRo{nhhhooׯ3LxШM[ߛq 5E7j:aowҦ(nl%w~uIU}E/H4ʩ+lxg;r7oI===0W[ZZLA& ɓ'0{_7& Sc0lֹwHFfGa|Rw goG`^eGCGvH:)#^UG|6ىJh= 9I0O,c}Ƿr0l0x׿ogBGD q ͙;/$7>)t4gμ~4YG5t Kh"9 auϤH@rYGp $I׿Ͽ>|2} }Ƿr߿0D#iE>37 df~vFNQM!7D$iH˫>;Vidb.p(5&%D~#eT/[Z @ne4uMQ%,p貥vHL&IЉJ!}AљfRr*n VUs$eN_ukiy5y퀨 V1IYq i*m@4e}ב1 @rUPV1>RL=u2V6T rORVUZQ;4I','?}|ǟ ~443aM~FzFMYɏ3g Y|*gO&Zz! "'h\h \z )% /a%]@#%5mO3rEi=5u#H?8$f7(VZ@J(Kง$ in!#,ǀlɧ/ZX%}L*& /csڀFʚ~Ia/H(&}ꂥg&MN]ӧϐSҼeΌ^f]G_9DPaH|.: #`=oWxg-DT"nzgݘqG\_By{E.[ L"vkRPVv4wBFT1ەMp 27}7q&(QY@\fOm@iӦw?$HN]׳5uf톸ԇ-;QfaiZ"W鎭NrV)\lmƀ;t Y Hw꒳+2ˈ4OǴ{ߡm}bR8`S 5#s%·FI9UAYw3I-;6H@rDu=6ܴWR׋˨ ] ~ 00;کÁT He#y[c:Lwi~6n5Gôg@O"^@>[=>y~:Fd! QBG˛C b,]Ã8N,tʼy +EadUDťcfGO1CBJvV)YEH:[6 onG @mC[v.%>QIi9<[#<{3W ۴y䮝;`70Υdu#Iip0nY"ݘs_;A$dqIqKHɃcgh^1$Lk[p V^- 7Dm6L$HC@% 7]!e }c>&HtwuA||<: wyDA ycbol5u`g֍7%Ki c|K7>H.a|h75tDN'{ Zv:r *}H䈞>4w-h-09s}Bfh|~ƌCyf8+$$<ˎkoI>>]@71>e9ɼBG:>upC\ƛ{āY'U=ޘ $>! ܣÁ5t7cDA_HļBwT4T5tcs%nF4g}IE'!]m'hTб3j[`@2=ʋFaIr*ƇNTunٺaj2Zz5%:99%-z}"|CT4*<[^{nDoDK7ldT3pou,'3+LDeZSqh8Vt ]f-%&ŌJ]B3*C}$MCME7m82x?V^UWL SNzL]'Aqi6^D !rOH#HADDyPVJFz @Rx*l[۾m m#JsN"(9(s9d$EEܧ9soZTE昣GUt<{ν;ΝwWF"30aq5H7[G.!1g/^@s×U]n?7O 'dblu⭵HljFR Nޑ9_D:)08ԅ[6nI95P-9xG" n[vX9[Ξ=GDRᎩkzM8D$D=#y zuܾ -^jj#: IZQKQ]ڦ#'_ݾɅ ks>[zb<…mcR$ $8p-hdQckiyHٌ6*h!Ѳ AQb&qZu{ԬȤ܎hea>A@kU}**#ܾk{[vd.2 <1JxZ+HSYWU۷_xZE{)ºnTx`uZ@7?jAasz|c 9x7Yw9I,uj"T= S ) $Z.;=}NDHMQatM7pN/yu"ܾI=@Ǩ@fNȨ{i-".Mу7#MHL-0[IMHT@n’Ȝ"F 1:v?+Xٷ[AE ho- BsvkG}DDn!C? 5@r G辙ut#FO}ܾ$Y:WsC@ZFQ :H ilGyinfh!W iU"dl?Pݾy$0p1@0dbjZ]GH7:o*\|y;{n:YEu*4 #ܾ˛ߡEk׳u!MW 3F䬣-dŪ5LwQWq n! Y }09#Y IJ^c 2^[Gn Ds+U '7BFH8pe 1]̻34n$*-QjNLn.$s*Zi~PT)pHwe{o$+87ou "iORi[x 4A1(o!7@?C+Ԧ36w^H;ҕW$ZM$BH'<}zȼy߷D呤s -_t V"ܾ79{دyH}@Vٴe';:HPYϱkA~x3 $8p,:?Rͼ)AMݥu/uT]\rPS҆7՝T9-Cu_QRf軎[GeM2 we?~=讣ʗm-(EZa YԋH)6؞%5w0(}U7)Cb~klv#Y4&ú4EWXy˰8HLugDYSA7Ʒz(Zt : Y98H#7N}?o<]bTKQ8FHܬ,KvCQn↞o.A9N7-R ۳*Eשŝ,u Ta c,IJ<[Gc4]b(:U-Ύt 9yG#kܺGJ,v[# ٺYG=3Ȍ*jm{=+D#;Xu}n>3W( 8FHqL[GuCi) ڈ G8 $QR^porYGt%"ҫ,[YޅjW螼{aIyMY4~*0p1@.ɿﭣvm} c: ]EoH4]i"$.+&4ВUNF톉A(E)sWRPOѵ?ΘqGPFI50 #5AqCJ]U ?2#i1}+L>␢ >!@û8,(@?s]I]hD$@7:? )?|A^E '7S3o#QI̞8'<- : ^MkV@T3Œf@{GJ^= @HȪRoD< ݰpɬ*ڳqG@BO}.Z5'" SՍɵ/jYީB.b[j> <{ݭ#$BAq3WIa c4neM`rn̼I '=׾A-N*im١(,8' j]xGeOA:pX \l _[v t@.<ضsc;_{p (B9mC9]t fϱv HGOʨ4@Ǩ;:;:r$y˫h# Y:17&S-hnTs#’t@z@S{_yHVIӕu@[VG$GX6xB2J t@"ͼ޷RdR3fyEӭRyᅻBH8ps Mum^mN~h̛n֑[0 $k`dMM+$$dUH,y$(*.Z?s欨*+qhŪuy&IH8p[ M[Ghe]q=aFg5N 1yCFRgտއQTj @c)| IEV7o{P(9^Cs 1oAݠ@=c37pN:^ y 8ľq#_Hb or7#v~`鉁Ҥu4t.$~! @Bbf:/28aˎ 7X;C-\sa+({T-j . bB_iJ& H9.˝NP=u ޽0I#9cA hY/ZQz8FH#[Gcb߰'y#S^3-r;uU"j_ӝ[?A% nA)WF |r8wK FH8pe M[G=Άu@~ɵ a҈"/S&s@R=TA8FHSuD.1}#1.A5e&8HcJa[GC.фK FH8p M-SpDjQ{Ve׈KdJ-~5| cir豍Gj~=kZ'Ս\hyޱ#uVMԕ{g]:?w=QZ+գؖ,۸y/A> $8p%:qLN$rڈ9GY7 @# ( T2pO~~g}NZ7.58mΜyn!$[8 Hc0MtUVT;S}؊WM9xKʫR?jUQ)'$ (RR?-}3'bQqeiy*@Qh"M$+OIYxh v UT2ͬ!Qy/P!*8t<;OB$+/8Op&id"*+(yuoGD$*\W$?#iQTLVmKOWplvpL6A?-[ZçN֮Aޑ.\m]@ꁰLIVY[YS?pG(JkHPΦnd $1E/A|'A\nˊUkM}ToG# $8pt = n9? OTj1peatmbo{7m7m{SK(%ňw%dv"(!1ŠsR ;X[!WX ;E:z FH8pu `u4-X /Ӱ PNxA Hx8N].Fiu$0p1@%jxKL`M# $8p&"{O(%yD? oa clKL[GcЬ a coM[Go8HL>{(}0` c 4IK u0y3u4:Q 8FHխ呂Eu4, ;1p1:@ڲukDB4uT㭣o_M-òWZݻ  }L>a0uBKAI5uc c@??~G?[GPn%vBm4Zv]BBB~~~UUUGG|b c@o߾t;;1Xj(77_|׿+ $8p H\vvvߔϟ_8礠hTSSݻ?s uH? 0e8p0 IuuuCC}|?kPѸ=8@{lŷqJ[oA_?OK1;`, J" |r34@~an>{ߤ8q, ;] Imٺ?KIʤJF8XЂzy?8G=y% N|5{i{8pY@֭۠6=qze^u\N" /1mV~o ;u8ǨwGVWN H8pL2 9F ⺺W^}y|?10p4@B S1p)rrrZ[[G I$4Nl!iN,'42Γ0p@`ciH*;@it NC h0p@@R0pFeUܣՎH>I,1r舸S !Ie8PˋgJZ,K8B'/e3RҁWP/cs[ . rnY;[ɵ}3f=$fK{`\sI]$K00p@Bay(fxoo= ][f"vV\rxe]8O?p3lK|)qܷ_j-z/8}ӭ.{ [b~5i/ x2s=#r,\ NHH8p` -o.z e10 #8UtN3$P;%Xƌ ^g?=GEtmP(k \oM}Mr·9¿&.@hj PdC@v| ԏ[|7( *T3{H|Aq9 /C Oa$} tv%? [S_s="aT$1B{ t9y` }{Fr{ ;~x$ VؼΝf,9v@ι54A\ȶe55=_p1Ak皻D̚=g!ro3" t9y` JB%2h8%@ RCyk υ#v]ôsROm@i(y_=Iͷewo#o]}|E;"_ogD3dlE/[|7flz/!_N (;{k~!k!;f {7_~llA 2{.1da}|u0&qnй/)~a:%eh_RF -&8 ȇF@J@#&@#i$zu FoƓF́DG~@bJ#z 1Q4b$:1FCHHSH}Iat 2 Ɩbc@Ky4<eM#VQ`@S ʣG)hT1,PѨpa Mz 6La6ܗڰ!~"ec@v FBd~H42uMܬ@@b:ܗڰ!~iXoAZLq,H44ޏ_mͺˣ!GYm/L N q u˰bc@&Z?ǧ<FnehXk 0Uˎs_RŵF͝;mXͺ778 =#oReSwËOy-C2 YqܚuH7qy  -bc J+? HG&h9ִSk/Y9!Eg7gbN(".CWqZ HŦJn"6hl2 a Mƹ/HH}:Q}Gt@bljZ^hH ִ-_f]u_MKҝf.䆃Ե @GY4*y7ryT'҈DwԹܢ*Rc{祤?)*+ Ez'b2 NKH˙8XϘ1󈠨ntb&{zu@#`]@""YF~%G:z'C4uIʚۺ@ѩGdHNN*i;Eukx-a1K4ST:zF%͈FZ)zFv,H 9y=xb?Fa-5 1p@F -bc %Zv8˯@7o $ cʪTA-ޖeO0%K%?+;oޕ|"ZHP :b $}#Y,uv88]C\}֬]O0V񍛸l]||{[vAsgja9{IES GWp̬]X:{zƜ|gzT7rrY98{E%/[*w Ry6xy4uH8qb HW! ͚5@sJVQE_wYc'^ɕus L& EU-kg@m;I 9$'ԯĩPCs9%uYWMiUpL٩۹{{)4. 4uHH81FH^~GɛFP@bΛHq1F ͘13<6ne ѭq 3G W͚5 =け):o*#R#(*B@D@2Eܷ|X;'5&.{ukR'l&HZH p`DI -j'@zI'%-;goЈh!~R,:zsAcH2eu/gEU-6+kl_pm^X>8“Y 8F܇HcRPbV9QD/&k{02Q}GUcn޲Y~LTzەۤj87m,S '(*CQU"Vw7e-̕NύGGYHF<@^&1IP- DIjU4&.\[w޼tn< Uʫ@8ijf"i&uģxdfRP<&X{[׀fج@60py?M<&c  GEi4ae3aU#-/߿c|'K]kO/tzixx-kxSq4l}_vk _Z\Ѓiˣ> FHTUƢEl!UvkElqt>f9ΝDς"LJFHt10:SXNN|vO`dۿ]~G\RcR)aqJbzUy i휽#r̄jD\@z38[Jh|}9q)Ӏd$SUB׬su?+0Z^I]D\Q^Y3RQ:F2 H57=QgjM_[HDB%.$& dzHi5 O:f$% :{ֱtG%9 mǶL{ ɸiiu nQجTv(,dsazqwyUuѕۏ_ͫCR77Fi-H=4Zvym߱ P3g\YeK;ߠm=Ll=#"ܸ)ene5Xy"+KHŻx-Y,69#+juH h+kЩ&$:mZ{X;ypںz^rg$¢{O8 /(u}Hlw\d_H汕gG@̹˷׮ 'sprY88z@26.$ k_'d?tYruBV(9x n=H\FǓqUVHѱKJj_vǔRbfiBF1#?kF\<4G@gj3cL~aE54w@#: ~Ј&.JXHMgpN~TO%J(k%>GQ:c "mXPmp:wll$dUሪA|V e.*Zbʏm}n='ecx @G2ԹKH Ə*"gc{HUlrvBzޜ9sH ! =˯5kvH)&fP'1 RBz7$^~!$KbfQBz!j7Aޣ?/h|iKEc7$a\5$PN;pRGq5J 7n dGΞ=30T[&{ / 唵ok_fge5&# Q҄]@z@<ħ}#PQO>eG8+>{ Hs!'sn;ze3^ܑ@b{H,T4gu6Z:X@AQ>!GEIi" ˖XH ^.]Hk;z2.;%%D4s,{P$KGߕ֒:WMƍGHL슧RoHA@$eo (3<}%>5s]b?͟ojnD  Lں}!1HFɘY]Sr8Fz ms hyAzwHD٥~ɵc/{f"e=w-C.Ҕ K48:TPÓ}CG$[܂/F$tx%1:ҏƱD H@ : 9߀rB ZH l4Wl1DMS燁r/TKTԷT6^l~7ԾjDyeG=ƣ݃bҊjߤChS^@ޣܕ{. ߷Yݕ4Fŗ#$(" Ws4 )STgeZJ ub;RM8u}0|8|hUBT<;EuNHʩrm۵m篜\R7! q su 9H! Zb 1h56|㞵;G6oizqLTG˗a&:D?qUs1ެYMk M7-cdDQsW]Tľ+6QB{Hkh4lR<+:=8e5J6Hhݿy`5v~8+@ />LXlMG/0FHضm¾;W*B@ۦ\a44 ؕGc:|Fe l$6QeJ $M@_O+S& b: D0uکnP3f@"z^GcoJ$L#GO$dE$d<bl\b0$6s ^A턱 C FWy&\\>!;ٸNvk:rF'^6n!"@a~;P{3" qnfɶxG8н'΁'\v?t9·*ĢV K.dUt1_X (uSR9篙;Ƅk"ґFmԵ1s$F;vpGbj#Woߢ2r]euЋѹpцTWZ-$*%".!ƾ"䩽Q±[P^Ķ;ix#mMHH*C1TїsW pUH74&Z$i ɲ $ԯoHZw͡*"Ev)H:ԐtYUP5rhOH *H*Ocl֭]*(?AY$ҦyU/!7pl K%dT|ƌ9T " ]#Ec~d\֛Ϭs9{/{ÏZgrɵ/+zu篛 h螖Uzf jHK@@BJA@ !ΏWNF/>Ci=:*<#|.7# ѽn= H_^;iyC|=MSURץx$%~ŵ lQ4z|S[W$-$e(RBڻ&) ٹŸ˖\MHLzm\BTKoxKNY TotQ] I+W IH-b[lқ" G{TD5 eb#$kHXw VAH.~q~~1/Q<\;Nv`1i^` HΩF I+vn8l?7Qi4e5l MGCRd"1ʥ\" EqHp@Uqg(4$d(@b!9*↷˖ !D?w#YN$C._I'! if|hyo~I 9# єiM'UH93g2w D1Zr Ҍi @F wBHt@rpG2A@n% $O=C2AHU^7QE$p>xHB!1c= Ԁv$sDH #GR!>+P&Ae5t U@3f4:SL=8kKi6Mƣ@;aHR`1ʥ#ZJr 'Ǿ 5H+.CnA[I嬁T3@8tqݾr*7mEZJ!+UGp -⥏m<$ºۺF̀$)v#귑}mtE^U戨./{=&542LXzxTS[6տntQQA7Vw{nQ =S|@KnApa ;y" E Lz?|7I! ]Ǿ?H* I^jv޼eG1{Ƀx ~@[(| '͢K^ g̀' &1q/6pl^ιcF NάxDe _q7Q xj1& uOGTYa MAʦ,;)U/nTeQVY[AkB"%^ؒ[jPdd--Ar2J;UVeTNb~sjQ;) O_1t6Qf8{];O]<ڸb9Ml܃hM/@bh:Gģќ11U6Qe5݁4}|Tl܍쭜|*{UYctYvX@&C*` Y`:Z:R;u]UAa#5˛˧SAw4)2` $=\1 HCh-\?Z:[g-#(ʚ=enB6wo;wWH".QD5Fw( 2MU|;۬caeh0d^A۬: ) &Z!_Mk G yLL }*i_V_4=xT! %_XTΉ7AH>a)C">qH#у_pSk'H@h6H=wK{w[goR,IHVaH,i9)Ytz111c&nhl˭OĥeUSA)z'k!:w5(*[x"e,2hEkHAi+9HS+rY鈏"F( Ji;dnﭢ'$&™HH>o=QI Y44fl! FY5;KIgeo8cFHbRYs@H+PR9zg:BZ"8ؽHH Q VYGjdϾnkw/.߸HWR4uu RiV~qNwtR(EUm'Ԃ]v/" "%$&)xɲ,QP;{1) Wg7rv\d_lytuN#K{`k׳@Zak{5Hbx3 HbR *g( _K#ӧCiJG6N>f֮|#ᚮfv".$& zM@RG7c!M-]l6U K&D1 $ػ;\jMrsfn=7w;d(-wI⁹+rH2fɲHT Q2l`爈Ko@2F1I @&[>k즗Rl$e%dyL.5kV}$UAuKv'ϳ-+kڲ t-#]? Z@ cak -HVH>ݹ)*7.n>:ib5Fg5#SW^K=F('Pbu9@r uh~#ĥe@WVNxP3tqgݯ#]v|.m*b߸)(:cP ŦHʛz%mcKn]N5j=7Sc\{Ӕ3k{7M<|ҊZxY{9[j韅'"Xy=p ;ǃ luO˪M=ٌHt)lʦˊ@&iy@ןFֳs0R I#7.24t~ٵ{&d=ʚzPUE$IplШ 3hOoOG(qy7MT1Hc-+wUAQg.i"Qr@Bw iq w7&{PWl똺z38cػ"fD<=yQ9hǑr.R@HC< \[!ǦO[Cc)ޒ@YzW4! G )gՠ$FlF dI'({y_P H˩h;Tp"2749SI ghɫh_z-OwH"|+Z}"@'xYb^[t M=F@*m|p!^:LTiJ-,5kVueWg@#rm  &Og((mXkz둱r7Mg?PTnEqŧ}=\Y9@V.#e7qYػ{PWo@՛HTcoE2^QœSZf+Rʐfcc;?$_2&54DT7w5x?RZ_)je*#3Ql.{\2f&ܪoj>ԝD3NU/Y}fQ# k_ݷ=*moTHE*u5+`-YUtI-ɬɪxCmW˦{FIx[zl:ͺ $l[7uiI+@H#=ڶw5mD\\7򨒢 d:LT-/QDd4Qڲ #[gʖGZÒWNiLTuN{`N<2$́XN<2H@0j&sҷ hzc H8v"6O<Pu12QH}ڡ.H>}*ځU:&~UVH< H,hT\;Yy,i[riVԞ]4ʜ@b]c3yeGOmKFz'N+#yh&)CXǥJ UP9vTae+()a/ r i25$: Y1IwM@blyD) I6d< + J$<+K/fT'Έ\o:K,z5&D*mS 9 e$;&pΌ| *:a)ZzRu C4 I۸4]! "u]RAn>VJJ0XOOyfR GZ!$SkaIYC-jX遹[lf5 YhD'oph qn揶,S= Ԙ$K;wwpиKbOL/ojL st\dYdb6!>EMh?@2f֮^^1YT7wxg5^Ԏ*#PsONY/ʭG$SK7^T88zԋK+ ɸE[:-Y #9sGhJ~ז]˧9sm޺Pdj6{Q){mB6plz GE#-^6e6MIea Q4h(j+\-X4f $ECB$6~{yDȳ (Qep B2#,Ss]&f6P'i[mkOMKȄ6ڔ#8$T2uvËH,P@?k֬_LxZPsyHBb3@2 ~mفdb S ;s֬OH;IEuj_sl JG!ECs*mA!d'`]|058Dt)yBxuݨpISfUVHF=Qn H3WEvTTRNRV?"R_Ah8Жzwh@k8ruOuߥƬH3>""~0%tujƌ䔵BABaICQܪy씀kI ٺIȪ Ij$d" ]oye\A? ʈH*:#\{a@l>RqGxe->a'~O_0<̙+Ed$ɦZ7Ï?.[%a*!4rV IP! " uknUD\# D*EE(i Yg Ӭ\|©@bTs 8'H N/( %Q? }vwbxD+<|2UEP~tvG%ƣ)I8/(u}仵qbQzظv1 C8%˶di@TF%͈AVQ;Mc#PkwnYQ`L6<ԅ6n95" *W SH {O-<" i>HBs7,b2%Kۺ?Zx{p*;8掁QUZB! Ź?y9rؘ9y=hFPLOH%졥".IH@QG/_j` ELeIlJШ'8jZ:slLE^IHH2 40^ ^:l*e)$. ^@A@r2. db՚*i [%ft ofغ#]g1bwLN""!u ӆCH\uyY T>e.;,D쐊brTIkM 8,DdAmOfg4de_0s>úwBCmHH6|gT_7VɃL/ydsISO6$ߗ,]j" #JO)ϫ!ѩ_@)>A1H2ɤ3F^*;>ИtPk㣬w]QA@tu''$!!TPVCo)fIq^IWZMW_޲y3'eӞ[HQtuV}xEUg C)!-{ouϒL*73ע~`wo$BZJjނ0Ge6]C--$I |%0")6/ҳXEezwxDaxZ47A?tDva\H zHmfi'~[oS%ߚz3_M{t8]  L-lQHP=چto)$g0<hM($tGY<0=Px6B<)05nxYB'#O\EW^䍤hmzu-]u4@H@2/`I)*SW}u]AH7*UhhFMm=55PH<媗!Ù[ۙn!O` 8IK׈ҧ zIKT"$' >SVw@nG=ytэzPq.80Wشba4݌״䱤e@R(s3Ŋ )A~o6\zd7E|I;:?sg/ .V[uxS)'{NJBjUku^'/BR g^?n&i{%ԚLJKT\BzU65v*XvY|.8 V(Sq@yG+ l]ɢ(qdB jr*{'%(?x!=Uhj [RxKT9G_!uImfh%t݅ѦG6)Ί&NN}h]רUUG#lKT kZnlWJ[W=XtU[=PLu[H|mc XOU-Մڈ`$eמ()lUƣQqj\}BJj{_޵ϚR;Jo8yUv0sh3/Q?%ܽ /274*G[4 Bvfk1xFG1"3ԊSI!V ɤʁ†ң. ^MW~)$%f|=(*M RBħI"!AX26*퓲ѳ #u($^N!)X7݅4)ӣ]{>GKVԧvG%!1"2\P1fl8NɟQ!i,rW  6/$dv?#&n>"/A(`b yniG- /1if6z[v;Xw5,m,<g LZƤ!2u#{| `Enߡc`kWid8R5uͭ lFA]Z~]}k BxF1fos]u7o5biY ٹ狤jDzixk:G?1M]/"}[xv(&jM=m{Zo!J|BKN!nla{ smڭk|2җߝoRjz \C!)XzPb3_ I3)G<֭- GTē37ՄLQ#/$)L띻~2cau6m I!QE!] KXԢw x7ߟ O,RMLCc}ff@fzm;R@H2ĉGH|Pt_XX T>z?4 ;lg:6d颫[.^ªΫp l>{NdrNiw`\NY;sɎj^[m픣g`beg]"=mZ&!.Twhb}K=6Dlݾc.zW?_)}"_Gˉy ^Krh!#9;IDpwh:/$Y!F 遬}1y\G]⥗~wgnt(!IVz; ڻ%^)X υƦ#&9W_z{8z<)nI٨BR}A}ͺΛK]ZHHKͭJή`(1j鿏n6$ADj;j"Y)y5kk;G19udKHnQ1}whUBV|`E-3k T(Y# vBJ-kj-Y*&B:`UDHؼUX/ռ5/: Cږ" O 0⁓DV};:xLTxINbFģ(xұNҝJ-O\ xt;ʌxN_֧#:*u `:<Cn=|W_K&mPH)NC %n!-_)#u@O`Q^Ys]y>%$f7:pH  !Ԅg^a ΌW_ë®SY(f1X| ìj\}É wpD)RR:F0bZT02C-$%.Vs {@ !%0B*w B*dmVDHEyK, Y ޑƎ)".@E<RpT2D<"m6(4:u oh]" X{]ȾUx #^]xEpٔO@"#Vo.!C!J !$xeP ($c3yB惍 0*C! GCE<9Ԗؗ_~yފm&B:V}?xڮan !a7Yۼj7Z^#w,]NP,F}}JHri5ToњuPߗ$Y'omT>j֬蓵FѩTQ7]r;tUo(dGI$κ!Df;v`q$OHa/} Bbߤjl4!Ql\leZIsSGӣ[]HdD^AO72o2RۉatE0w]f#N\"BRD0%zL&<1KTBS_UNMk! uI{AC5ցe1?6!c=Tc1"2tU^kdD<勂OET)DGɫ+#!^R^J[n*SYޓW3INAy_fV]힪<6bbr+6۠x6s/QUƣ'sM_!)s=UxO1j06.'qӾH)p>&D# +,ԄG_"4zVߣ Im$Ƥogd-a2%XW5Z9ӣ^) xJ\[=ŲatB>b/$l#^H JT;RLKޅOvwH2>!ŵ=*]&lpxWu)^sʕt?Jx’ujN'aLu<Ⅴ=N!AyDmYڨi>YOwPW`#$I9Yy! $vG[HR'aEVH.a4븅`yRB>;&uam3SH{,_UBH.S INBEW\)N| `2$$K=+&GPNy'&?:}06E˂i# e#y':o61bf/+$`uG:wb{##)yKm0x:{c#i~ܤ]RN{WRSN.0@dZKR|Rhfi6:B?GV#s2ݏJ?"Jez[H(d2[NHԢVrx#KHԗߝۤgTuՁB!Ѥlcƽ 2uLaS~R0=Ԣpz 6*4=h4".?(7Lɾ7657f^4Fwi]0bdj(Lzozo]vXRݏGNPm$bu6°#G]txb)Tu}D@6m2?&ĤIu?2ϒctQ\g#](o*kY~du1t‘$ ߸L'8q _*?F? UBQsύ_H="g]6l'+$FG!MhK|B d$>\g&<Rz~mZ^5KHCv1rDPHI&fTܳ9yfCq㞉ypl68mc*uKtU>IБF_MǽL6عOei{`ɱFfV;X紳DIHrW;*7Ʌ-dajhjix[VH564 I{mT;! ށ!:NZDq9@>=}` ^DRkج6-$V|*:E/wa%KO;H4L@H}"Bo.;Q2[@¾/o̍g N*bIK%Aܙ&Bb Qon!BLٽz&DHDX{,G-IhSH  ߒEmt#hGqX(R 龛QSXKTm G{.hm TESHXQSB-&B:*[DFB,Ϳ 1/c)=z::L|Rĉ/02pSUδQ**֩k[#u *ZW`Kob^w?9{1xE$JH!ɘ쨎8H ݾØ쎞pڼ*.~Zn!EqOb|A &FL!)VN<#_źᄋ),jέAZvpy/$)!I2p;E&[s* I4oB]?/1뽙aT79xuy$\)5K!$p,qs /8p &1i`QdYB$7wٵ-$al!Iu?zon!acQ ,Q%BGx$NKgl~Ľ G:dא($Ap,gC!I|#[1ɳxӧ0d=PqlGĎ{x8ބB "Y6Y<; ➎b!g±""RXHL!16"frp|WH*b^"B:nޠe8yt!1vq;< +ZB~u㐗!;0( cR 3_ezyzo",[1"1)?($lnl"42uwXH(t?$[%eqKDz$ެU"$_&*AHG&B*l݂)x!q)$*ݥ I=xsk=L!aceO|&>NakoL|mDH2aYbRW5{.Z#kd W`3έ;$Be .KHTS~/!֏l )06lTXၸ%XU'Brzs)ͤI^cZNUHWz?⍻~ȅ,^GMr_ê'~2{: v%nYpHzkW G`QDq?^pG@f [jX3[QDHݏB7y?H~L+`]`ڻW?*s޽Wߎq2;$>hb#L|($: sb#{%a'E&{U@8щDHtc ^.{(${=c $:B|a KRQH6e,jTF='PBQBv ( Kc)$'۾)!IC!y`.\ +-DHyuT;s9`Z )A[{G:+؆scDƮ[mͨ)QjVu[渞R[R{<*mj9ṽI,Qe?bJ[n'.Q,ͫ=ˬޛt bӥ%NT:+ PHM̸C=oZHoj"Sqoι!N. ĉ5CH4BBB!-z *p)2jm뀒BI%5NT'B7AK)j'BrQ9̶o_Ⅴ@HOD[i zJT-QQ¢ {6t]YtS&uI' lVy{ kzd q&*J! LbKPQ>&E`:4L-jUҞ!3#"NrtMUlYHrl\(Qm꽩J﵌뽕H'Q*k#^H|mգT&\!=Q*<ƣ@TRSV܃V'ԣ IXt`zsڃǼs#_;&j:n4U!T^uJ.vݩj%ҦVhuSJa5ͅO^X!=SB(SYx7*?=f6²G +5"UW I}*;NAy}_腯bvW28zܑQt)QvwbT"3QMmx/kD6݀GykZT t̫YR~k ($OH ? IW~NȰN‚T]]Wć%hdr+:5._}[-v1 kd :eW tҴYGX[B!.P^H6£s3{RZUZc3psE$SnHWH޵z;OY&M^H̽ ]}~#%8pFs5,vǦ )?v2063jԴ9fOݏv+u5,deLϯ6"l|k匓7"!:ɏ {0%aw ,>sS^H 7wl+ysnbNsXGS-}!$:V͉[&KH%fw*$QFi!Qo]p9xgG>df3BhGy!/6hl I!2c Iݑo7b4y_۾_w'v)F[o6 NƜbez 3(,ddq`y*]\&1ͷ6CpSͼj\0C9~!NlT'k]"Xp q'Xe@aC:[u8-t7[ :(& Xr8ypŮ}-or_K~u͞z&?7.+1L~8/ڍ&mQYX[^FfZP?qx.zxwrgi϶gܯci7 ۓ߲zN+CǶg69oO=lGH+TcߍM: $vGr:{-KviIV1ց0OJ.WC DHiyU)92ȰB!5)RG * >i ڹ dɎ,*/ #XaNJH]~imǤM^GudRۖ6I $}q7􍨶od aQ% _|yM}-hlKB!m1lj),pcRZN̚=MX 3[YKvԖKq/62ۭ~#+ &*8& 9efKBXkLBb<:Y-Fy6ˠ z6RT7ERMn'fSwTdw ,18kH.}f%$҈EFdKH"Djg[vB+?$b@N)SM̮L-#W{/e$9B;ob d$(2Im{3 ҄펤ɛE SHa{XBG‹L!qB|RsJ^kJXePzn*w ='"$FS}zl6a[ZZ#$G[q2tݦb_ʅT"럔+߿џkS;?=zL!MeWCf,\YQQ244?N14G5H.~8ɧy0MIT?=r5?sͷf:;;oܸO?'\M!'k2 3A0R~C9f~׿s5.;@_ P7777ң?']]] L~^n @A###CCC?!@ )|Mw1L?x&_Hd~oÇD޽{<`~WC&'I @oD??F<>ɚ|!!T0AM,]DŽ'*$~?x!?x!? ? ?^H?^H?~B?~LlIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/glance_layers.png0000664000175000017500000007753600000000000021730 0ustar00zuulzuul00000000000000PNG  IHDR%IDATx \MY"I^McHK94i¤i4Fzq4 6IBIE;ɭ%nr9$Qs.ϳZ9A+ BU!@P(d4 B( FP(2B( BFP( BPh B!Q( BP(d4 B( FP(1UKf^rђW-DגsUY{ctecki]kYZyW ݓՊ2Wea7F?5,;-a>;w@ϟ? H-S窤ժFU[Kxi=/@Kyg Ҫj0a"3窤G5d׿x0nכJĘGŕix/)>Ws<Ǐ+**RR[䰮c H\Ks5%%޽{%%%uuu7[P;P?AKYJ3I]DUIm$^9a1}(~UNFb<$sˁwxSj^HJ'77cs>62ش[nba-s5&dd[3*21ZbL[f]\*sr0 ~\f8ӂ>7x,'V;+Z$ʼnR|ݼ 8 uq;~1vخ V>z1ԇ:hm j?Wco5GʤGh1=kGZ%b3`Ϛe9fԑi&c={7M#-YA=pHrB;H<`6H9cۼ%eF#;5J2!ZbL嫲Z%b~e~߰8XLv(l6`t}I~ci7Ɓ*du8D:{FFwхܜ 2Hq>~`,g-Jf]T*suDf@SMH2\Vvp4k0rNI\`;Ɓ`UՄ/)sr2]PŽB](]bhfFs)Sf·9=XYu,+]٤3n7 J)cBZT0hm'ZLd!qحZR\ Q {N@|>zpuC AJ`dߏ5G  Q!ucIu:^sF!;ܻ#%8­zXh H}xxL;z5T^o2wc:gTmEI:|_ } Ub>Wѿ 6:iJbW~ZEN!N1Qgd6ځ91`mG3#?ohG?vRu]\~{FUr _ax-TV >OAq16pT6m|C3=$cn-(i -Vlkø)&_/Gг!-QTTԶo^QQ_*s={*=3T}CfTKS{&"ߞѹ[ yO#Bn7=t}Ŝ[|dSˉC ݂ )GO7{!IL۬pq:P,nKqs?ѣ0ԭ[7kk넄'Z|0W`tLakV)By!G|ݾB@ѯ|*8zI \ve3プnsHs7V@ & [7ӳfҮax#$sƹz.12^M|;7V|:跃lG~iWaپn=z ݷE؋[U!c8[~xRfl;\u2zȑvͮ繊~\=hbWZ@% ƣ L!:9ݵGh 5h7DDD455OZٹ?3FFK* `fLmLl-v񨭡dsǥbkGo Kv} E1z0dtf8`3bV͜Q87ò7?o})U☫6vl3d3/3#JAIURT?9 e43HSVr}b Xãv[];==Ç3qF]IŮA_[ fYXظpw2-1.w|S-0EQ3 Lpδek$Ng?-OGA!dqC?\hLfŌB9ފk l- )SW=tO8@r#EE] 4M fXkZwx޻ê 6dB0R1aG- o4>|NeT!5׍>d42~}xǣ: ºh^1 LINn9@G 53 h 'ÊxQK{ؗn'l_Bh=exEzSF#&kO11#?P,9Oy|fYB&β%M~}%@#!tYt/kGoݞoE&a&Lx֒-hd4O2VhJ@XSF/=HE޼v D$74ϴ֚`Lۑ JWq:CԆ2@FCBgj3P9FFah0>7[DS3jZv>g86r43 ;Ҙ̷#VzŐkz/xMrbot%0|~( gجQQע7dh42d3X>Lj)a;bX@'ڻLFZ| EN~+08SPx7HFtU4o;\M`$0Zho`9d6 ;h42֭w `m˄5ݸ;z\ʼ7BWJQ& 2FF3~;)LQO\뎌FFEN~Km6"hf4-ήD8B}- ^{-))5d]r%))ݻO<}eKK 2-=F䷆⫽|N+]*}~ZZsY=a 7O{eswqm'ϊW64x+` #UKD^ \28Y;_SZZ .㹴-ԯ^L nS^z#4&|tFaf322ʤќWŵNY~XũmllߎpDՕ^~ڵkqqq1(I)))n***U{I2~Z z Ç>0uEp{ .xc+;;;333===% JKKU4,azŋ>_VXp CSB{q۷aucXP C֌paRTAAl+''Vjx)^>z} Cy-fQMrr2 >} z%2Z$FMM )Jg ={ѓ/a(ofTƦܹs0a#Eb^RohhGILw0I {}PûTo'L,SUN{^Br_Ayp% *Y|g[Z}4)11=iUl6;!!!++^f22-REnFI^tЙ[XͻD=~nƃ=GBT>R[䰎!VW7 eiz~r(97w CPkX},K#Giɦ}:JNH+$a5 5]`{F9mw]~::q|mK;exNh u %?={5BI s__P?k _z$l㮁=y'@Q{xkM1I`ΣvYӊ1F{Ol1΀v^g.]Lo=d4 %i~ͮb-kڼ?6n?D0/[&ĮBko0Hidc=CS=zQ{zgMod4 %ij+]P^)kڸ?j)|yc1{E?֥#B{:LM>j&(ĩ{5 e}D@N_J8 <N/>_M2hYt_>>B{32m~Qʪ:*162@F.]P^ʓ6e7O(>IZBFPGϸYkʰ\hJTPM{»CmMd42BF{Gi ,yo=8e#$`ϳ$?z`\Z/_/jZtG/:B! d.J5hWo!#4 G {^i-{$5A (Fw-م[Lur3;n {/ڠ=|Ghd4 ~x|=xǣPF: ր4u^3p_}8AfP`鎀lX{#(2lb8Py8,faI+?PvFBhՊ6xF` >q-i Vu(Sj1 "Q(d} CLF/Ƽy ((2WgZkM0 ߽CFPh20|0o̳m5-]>gD6r4^tY5h -_L63T{9ph=SUU?.dRv9C-BwDF#Q(dt[vvOlK9LvFFPhf~f( F$=d42 e>V Q(Ue#7"q)> gصrd4 %aK|6ӸޔbRNHHy(X% _T54|I.zSsʝ*Tve”o޽y7/Ò򚆚L29WZF:][ֵ5o0̓Ʀk`ǩ{2%%zZRgI˺/^|b(%ui=/@E7O (xQZZZQQٳϟÂZnz QңƗů en%-ϓyyy]{Pf"M퇵w),,,//ohh42%=}wAy fg=i'NIfQMBBBzzzvv'Ojkk92ս>]{Pf=-{׳-XmW`٧vtWEGG_v֭[>P#QR7Jdȉ=z6BHH|5 T<ͦ7"$%E<FIjռk'2dޓٯseiKlS6}.\d$+pr#S}dy'=kޯ]{P>!k}z(B꣞7$8Cbv~p8 P^Xbægڻ}C+?%#GΓk9h ڃ2鑬d] Z on_ޣg-GBIlib2rgInTFI22U?PɄc Z)lgdJ h>Ap9Tv`݁I[;9ntEFP]{=( e·Bi033X-u#uM'`BW*ǚaZxv7:OAFP]Fr޵%LXW'f/j*N!N1Q fY;@r)`Ɏ+,FhTȽSƻG4$q2*5po CEyUFḨkʐl4Z< AFP]$?IF2'Ñ(dtz|gj錧FFP nH挛a-pgd [~wL@ hd4 %퍮]tzGv|@hF=Lmt L[.3 ]Hyc,~O,ͦ;idg`_z'Zw=?71m70#M>{bb'@5!AM9PIkڂHPolL =JC9: F!e[,*͗( נ!#4 1ס^i-$d}S8BP ȵR1Hmo2ZͰr;TZ@jkefRiNX-Nؘz6RPRC|h2쏯xzB[QXh+fi-e{a5){O9BX6k芻ɰ1j,,E o);cdQ#;:^d4 q:T=\p@%Je&v16Zh?iG6|Q,Zћ|2w皯Xk6$Ѣ:6AF %}?7lAF#Qz2 S= pǸA*j>tq:qm +SgڴhJ5$];-xl6FF10pF>UDo|ʸ?NVm!#4ɭѕta#G3 rڻJȸT@I}Q 4f!uMZ՝(F.k$ Gxu- d42l+Mx=qFѰ1j,Xr&R[Sݡe XcBB9`7Hr*ң`N!uM[ڝΡEQ>I3"(d[֏6&`5P5-ߌǘ.I8J~?BFwizCmcF!;ŋ::J6|w (d42r d4 Fo| nx.-qEȼW^z d4 չK[TZWnۼyRe%EDDa]XXF:QͯO ?@$}}u7r_x166Bu9Ǖ0oɁ"',(pBtttFF2\5px2Γ塡W\!{(T'2:evy+-tdqBu_p=('pQ8D5y:O+¢h\GPܻxYO[8e fZsG]^7KĹ# T>~,Zi;潌DyMavB"y[qҥ7n:PNg7JZ'~y;hP_V9|gsG|;B{]Fr9pFq;G"e/u hΓH;~4 չm*]{P?iNbm $-&L5&|'Z-7̈́W9Z̶ ˬ3'INno= *6ûGF&К]GBˏ}덃Hh~%xV?8s5[ԡ 0xHp`u.(l! I{@<&sࡈFdXijpB.ׁ6IL-7#Qc#L:;lpX{:d0i2T{ _M4B7HCGF#BGA/4$lx?Γd4 Յڃ2Q+xj>|k~H(u;"2{o"F[c =-NfC_sl+}l5FtBj ]odST/d0Yڭ=S#0o04nQ#I|O6rawHLI|62*<(]{P&-æhݭMjc$0mܑCF`$g>U`l4wk<)!<,hJ| [,̗( ^凌\7RN/{$Jk#ʐWQׂ6Ak10 ^,׺C~khAAM` t)oOlpc@&l˜![AID} zB{d evRqh_Gs Hc'0<04 Br˹\Jaf{ԦSY<,hJue?FQ@ yt)G 55&T>!IX;*A{gxBGlYh݉$a G{dST/0JRXqҘp5'ÂFė&kO115k~xR9$h‡Bhe7ݣtYGxO?l*ح*# L2~ .dKی-3)jD c&}?l9M>H qFWZSÂFĔѰ@p h.,+|Fo^Lmp櫂}24F/0dyKn)h2[5ZfwPFEsiic{T 2SFd]9T7[DWp7o)Z^zSX[L5+S= X e7XgجQQעw``h--;Q|Aʋ]Hv'9z]K(, ͡YSÂF^` ﻵ[ihQcŪhf30_¤L zriYvh[HBIxt9{kYh5uIc&|;e^2zաr}>7dT8Y[BRpmvd4 %&8F vc^ڮd|:&&IZۣA]wHߥzD}lƓjaAFPb7S+KG[ajֽz+zR<,hJɯ{~q_m6af/_NMMϯDFPgMЬ:CCP7I__?$pLQ```XXXBBBvvvyyySS25pI_ܬu"ZSG=sLtt۷KKK(TEKQɹ NGO=r>h4;~.Љ^^^'OyfII 0B5V6Ohȼ_^x.6cGCb}ȸ=Oal4d&eʁa1+bN_=r:ǏqƓ'O(TGRϮd> w6ViR9 m'Lq":Tuė K&IŒ[&O739w1_ijyRp SsKw]敗RrϞ={ҥk׮VTT:Pҳ&*/*Z5'r756H:85hJ'؄Xo!]]nӻO߀ۇ'A`b3yP =Gü%h畐K/&I06 Ty*$GP_:# x啛aaa߰P׌Cn⃖ĢWf)Wm>V_nC5?La7\e:fh< {NinYnvKB#ʪjK$.2*U={}A](m3+lW VV348*#_agV|(5Gp8C3M Qq cFu:^&'W Q.ct#n9ڃ}i303āI%74%V PiIsb %>`pz o,H$ Uu͑@} I}x;tDFFP]F2޵edl9"Tr{wl'$lF-d4 U*kz}AF$Bur+Oy!hHy|FHŵGkh(yBFw{6M6I42BFw}(v :$G.I42BFw h?? |I42BF ]i 1Cf i0).d d`ȰVy6{?gy|,X65_kGiC@vCSC?nEN~²}A"(3WoyO #&a kݡЂ/uPTLՎ34L6abkmhh KvCH*Z]`Є8ӛNe)(z ihd4 %Cԃ%0a!w)DaF^-/ ih@2 i# [`lOnkzdb{hJp fAYhh(NOOutUUetII )Fɜ9'ui}+h( 7˂###SRR+++#aU#SSS2%sjpASF4'鹥AAA.]JNNFWTT4553޵24Z}(++p>|82%s{^\{Pf>fI,S4Sڮ<`i63@y[CIpuuԄxcccd4JT-]{P^"?e|@!CUYFe@3Ԅʘ;}di:Os+;7,%%%Zcc#2%s.x}AV,֝z0īӛb A|~+fZ/;sVF{bC`󊠸l\6~'l ͏q@:Ovoutt`ҧO(ҳ&n~ڃ24{$@`OeɨUʟV@*T8xT^2/g) U:;}Cyu# :PܼJ޵eCiWYV1]`~z4"g/&Rt/kH_'48ݽGOz6t\^KjvW펷`.2%5~y}Ay4{j>wΆˡH yO3$1y 6 d6SM mWnHI=?,(T2:?.P=v"k@f7|puRsmH_Aqq6 v*a=~&X 'wсZѣ(SU#^9ڃJ4{DLdG1ܨPy?Qy+or"BeȂרih74%޼y\HHH&Ch̩{e}iD@6(?y@163LU7yg:h?v'ouZF.]{P^ʓE$^iOʐFf!d@=OhFɺYkʰ\yNt~. OΓ~ي+BFdNżkJO,w 'KI@F]0:<<P_ Q2')=SK ٷ9..uR;&dθn2kmffPhJ/v ڪÌf6F0F/313m%$iG̳cdZeϳ$?z6*'6-vl;|f6^ V6xYGZ/f߰vڞeQ(c4- iEiI-?dҽ0u AJX=ڨ5 kݡQ:h_G@\7RPR.ȣi@|i{veF(d?fzЏGK!u>AcJ*ť`{DzϰY#Ѱ֣'h=W]1wqF:88hiiddd Qhq@Su TZ/pxR9$h+x we^c&;@0iYKeqGYftii)V[[;**jٲerrrh2Z| MdTA#~Nnј|kC!_h5ۼYBv; 7n>x %KZ#'q(TW9732H:}9[QںH :$v*i}kG6WPsM.dy{Ia$.2*U={}A](a𑣂RJOk VV UK6?Rt zGr+9՝Bh:2r~3; 6pi=t\FэܻkK_ D:T=\p@%X~Ì^}86|`<Ѵq|x"}?7`ųlAF#QPXM=Sh^8˖$0u&>"a |WA=HEzѕ&7O=Sϳ@BAF#Q`\䣶r ѕWʽ)!4݀a#G{S`4-0ӷմt;]729v~idDڸ"H!aFE]n (d{6 0=#MSWDf3CnAʣtYPN^E'` #_|:㌀ѳl1_yc8># 5^E Сld42g ]K!rQxAR Lf>{b#/ꉡ <&7{~423~-a.ҙq?\뎌FFGΰGR(]ӟ~e9Έ/Q{㋜]c;gF!/(o/.J%A.+x8!tN9+%UFEE4[+ZZ.Z]}yի57ּּ j.-ĕ܌W ###_^XX(snni}PznC&R9*n :]sX`tAA1YSkqmkX.<᝻g0k?K1tS3f͂ .t`tUUl1Ip<hIqY=APѣGO>LjZoz4r8o5o䗝={6222%%%//R]{ceNZbLZֽ;wܿ0][[py)|C O-=sLDDDrr20I&y2X@y -1g-9Vjj۷`Kiqft oo[Oev:icRĘׯ_///oll|83ZBo;J态2;SrCCChx(sKxñ2)ZbLZLbZLL ,srr>}Z__,{!mmdWmuM7!W)3<9",, &yFF,~f[X;P^ӳv%>5::fgg?kmȗvUNL-f[ٝK܌-\=I@@7l M,[Ywxs {BdyE6]5xhn)ɹ߰ݸqC2-1gM-eMwk@'V;+TILfDJj:qm壏5 s"*vM7b*j gڻ}C+CirejUce#Ę5D1Yͷq,̌Df^rWȦ ޣgLv` @6rOh dش,;ni'd6p*yćh1=kJkK|ʀG 3P$ެYrdm8De操( R:ɦuȦfIܻ3$^NfUgtQ57ؑwc7Ðӳ%Q~.Fx@ffptfױCU=!k`jdSh$Fœxo,iޓmFTq+wb Ȟӌ,V``֩;sˁyd ġC e7Ä#ԁ6G -g2k}7z|ъ =#x_ij^ ٣\\9̊o$>7^0t $OCo5ڭ:]uIX8+((|]9Вk:OniF'=nvWzwѓn?ſW_B`S~Zo6fvj>hNJ5}Ilϔv|@n V!f X$Chgњ?nRp|ut|CK< ȨiFwHjMb&P$g T~ CUl_?c0 J|@A*jҽ2 48L1u3#gȕTκ|@TUUotI_,)EFwf~))yI<,u8򢢢;waBHHӧOiq_\pի)))L~I~,Dthyy\Hca5+'+:A$kwsz5[œBlBɑsqzj~JOBtddieGs uTJ\wsFd;s;ƛ5i?y$$MFl~ )o*q#`i=ҡCRZ>+OI b4YsYy£gmpj>|72E;ЩpW$(MF\@~Ljɪm>,,|0AІ_vѺ)q?9}MFl.(uT1疭Z1yo7IA}mO4MF\W*{'H_?!F5ecs[?QOG&#k&Kʗ_T=l#4~rMFLFsDsMFn4+Xz[HKOcih2g#΃Xu΋Fձ:v]+.\1 M&##F?܎V6-gR4'FtAX2vʢ x}%27ci?'{h~R+hrЉ(V߳@*vf_O^fV?ahb41Z~s8;`4,dtrbɮs퉨F˗ց {GЋ]Gagh0w-H៌ѮZ'}C y dda4R {K;Xz~Cmޅ*xM5g:]y0 b4=g/wNtT7 (xbw_偾jG;sc6ilWb41LLM|9@YQ޾h6FK;4n 1MF~y wts6!-9-M|{y;_ٯŨl2_j`lZb41-#& 2ak&eApd:a&b5::6nf5 Xh£lb41LLf Nek ]i)hVxEt10ddbet~ح]|`l;1F2b41Z\"ԛZdYרqOdگٗ~g4n1D'F=U%)#GDҤ 'Jϝ;wʕj_j5y5H&Êʪ*Fmb4c,~MjvqJFԤ,Yii)"r&FfəVVVϑ5YZZRk Aj`Zib4Ӵ<̟ HIip3E:&555==ݻEEEtUU1Ln+mBn DYttt\\\ZZZNNNIIIEEhi۸5Ϙ"SKbT6Qh2)Z,44422ƍݻoɞx4yɮ9-r.$~cع#nsZfG0wTTA6|G_sAnv`իyh22vb`U=<\B=nUϿZހFP=Pa ewPW+|\P_|9999//h22Zqm'nAU=7wZ满ɥNK\>Z;r6e6?|׭^,S_ 55b a|lo5vDgL0zO /eZrAnF={6** %Ff1&5#0z.|mYz;wc]^ẇ~4aa"/0PhSWN Þ#8-JY/*iަ8]k[jHf4wXo#Fc`7k)߰d"}PwWzlRFz@8_e=54h{ ϗjiK\j1v5ѻwyoh hx|#[mBRYew86mCm>9mށZ/Qei,6?Uw4Fu.ZL>{)ke!ݻj|FH ;E?+^YzxWm-LqВ *t3b4Y=,:E1y U=zwdM_~4&2ۢEˮ :t:|$dZ+sl?QÁ,e+_9x.R(Զ]FVl:?q/`вJ)s. V֥o7zDX z 5@._:j=Kv FR^V6j7cVoei4޾C->]rc,D1VYK#|`߸y$op7[~b_-B7#F"р!l \ĮQ`+v;,\ۏi!s۩_.e ^?zn|XQ_NoEV~ R A7O!_1c6?d߫7?/d--c',m8 [J&`$ ]n@b\PaMl5bc z{ȕ[}TQ?#R><|7 'd'lu\ |@H Eu6{оcNtUz@\<oXYGgo<~ |͟225"UdxBGɈѢgt M;AZ@ދ]@fwZsXC |ȩ럲n94qPlܵhs?JZ'}=լW.[@;@.F|}c=^2M ̬&1݂M&#F,};/+v# ڴ Qb4dػZ:@,8x2 B,ߣ7? 7틝8]^m dkHpdh3ڞoѲKL,bRdJf-Z9zʢٛ@3W`m4~+_9]? V"4 YHmaE!11zV?"_ܽpWм`/hnYl1BU7,G- ѻ{yZ޽#F=:> %&!bE&[v9T6&b {$^ُfe44~)_X]XQ9v!dGXjEb41l2-v{ϫ}@6/tr/2 uDk[U%ڴk9=z)áYk!RF*A|9']+,S/7jEb41 =+eߵPڰvf2Azhb41U"606UZmhBueۅwNh22f럲Ykٯpr;䀆د/rȈ$b41L'D M#F5⊭##kbVsd1v53=RS n `T* |2 2Z.yPQSP{erXB^UULȚV0>Sf~e̝+J#SÓ dHFWxדon&e**(+2o,~(/|:s1),ɾOpղ>|X^^^YYYUU<.[JC}AHEEEգ橓~.)SMt>>55533F4ԡLc2:~M3FI4W-ڍ+W M(w\NCUrfe%[gm]hįZ .DGGrss(פ1BCrԂ7ndddY򛅊3ZI4Wȳg"Ld߯l:*~[yfĨqϜyG<=>gfַ];f9vS̑ȤӧO#A(}֭‡wi+$ш_3/9s&<<<>>>++ qMhr$ wqعW+^eP^&jk}Cȷ}':u իW {ř`AU;Ҭ.*-1&Yˇz^{玅RGd+Ya/hki5v⧎N-*9֎%_>jkD)c&N  ;y;q[m̑0ٿoߓ'+thįZ`2^n9̠a|9۽g/[뽐 eT;nt[u2x!tan2c-G ]^~d9'UӁ_F;s$4ǍU(O|t&^MU;ռ.,-4TG3b$ۙէK &?GN]VQ-#3q;tBZZ| -[顊j&rrtQ!|j̑x13f\8l$F:׼~[PaKYy+'uꢿ?dyye"*ДY 47TR59-Z̀ ]v#;G-cpr#7yBL N#FIh얕5{<-8Dzw Cz_Qo`b9;dh:uP T+(4| 0z88iK ֺiM^R})GHP/8lϤ@39[Xټcl?ho!hd%G/e -%g.pA,ҿp 6 z'D9]fĉ1/< ;efb?gN%V"Ssϭ}{2\(Q/aΨID/u_<~[Ltf-_PrM՜gqG=}]̌Nʓ=:lO%@39Kfҡ<#?op^{jQ./S8̅'cK+saUS;88hP@ WM#WYwb7L0+uzw8VXO|(>~6><`ye߉aH::23:2Cz[q&l21|G 멋ݕ;SlK>f+2qCx#{"kb(>~ՎJ֭[gaa1c4{eaH:+^'F?5Fo:|zcW{۾i˙ 4HBZ>cPѮ^cצBlx+=효ߏx񢃃C֭њv|̴g 85#ghb wvaMwZhf,`m;eяJF|<!)qtd'7۪9Ƴ$<~m T詋BW#AfWyZ(9P2T]?gZ}2 z3_'}ChNM8~ T 51M"FR {Kw` RRքQԂo=ÅC4ڢRl$gZ$:ye"kG\v˶#4F¸+`{᧦ij?ZZXShb4^xIЉjף& | Tً Z;ke=z" Gx+0hUh%oѲKLU #.@}-"1lٮ% ~jjǩѪ5uM&F."M!٪A"$ בf125 ZZcCWG/%F_mthTHZFc.7+S4NH-V-kb41DY:7%BF[s|{,rөlN1lASkb@'"nq.ݝ܎51%>xZGFC W6i^j*51M"Fצ[Nx!d4@˰w0,0w-;d_^vxr=p$"MTMq=| KPAJF|G<ۖ fn^㌕{hq.S4NDifVj4uM&Fяq@q9ޞW|5|*CKkϖ3y-kWRSSE vM&Fэ"F{ѷݲ=zFma]$bѲ_hWy.rp~_ڀ M&FIgQwr񹊓!Eܧbr戸]RTsRFw]xBM@Oz9]U-L!!fo!"ftuu;St?D={6"""!!!;;Mhxʳ0$A YKCCC]vB1޽{qqq0q J>}˗rrrJKKdsaHRdZhL'vׯt'F)W\\)Ig Y2`.]E_VVn2q:111c  ]2&ZXXؕ+W:1FȈdddddh22222b41MFFFF&#####FϕX]1IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/image_status_transition.png0000664000175000017500000075143300000000000024052 0ustar00zuulzuul00000000000000PNG  IHDR9]OsRGB@IDATxxTUz$PЛ ]+t]׾UwWO]Uۺv"U!BI = sS ;3<νwyy9$,(@ PC%gh[A P(@ P~(@ P(@`cW(@ P\I@ PlF,@vI*jWqe V֢ uoDL#uwqu+}<_Dy!>]|٭S@g 0,y~.(@(C*[2UÜU \TWWggTբNZ P,AQC{5]}-C/&};*Hzl2(`N̮`Oͥ(prVb\ޗ{T D%x1IaCI`~F5lh2YE8P\?TI`Ԩ IȤH1ݣ#A,(`W/1ȱP\hs s0?-GZAqClyh}c2K>@ TеG U#1O,^^0 P9ʛRp 5OfYF.>߸l; %¦H059V "<%0Zɗ^E;i).% ="Veև' }N< 쓹4ڍA~Yv ŌIvP"dٙ B$PlTI8ߨy8 MMj6wG%3-3p/6ebΖLV!%> w$սϴNXAB P(odDඑ=p&sk[]2OF%Pt<єTTmNJֵHOR5G%'/Eq#zoMl΂$*^'kk}x(@ tNS!}鱙$Y2ҾqstOLh.NVRCڗ'%X7Qze%0 ?|'/ozzE^p'9Y2$IRJ$t}=jjU**PW^[_VʲrM]MȼjO1x}N`l {tR?LmCOU(p rNCZgy:f-؆J t~5<<Kjuʪ|# K /,^!I/TQ]RbT&?eZ]&F`zXmЅ!'WRmy~vmp<}Y\s&(@ tNaR0Nϻ[Q^SinUuhl|&ZK=z+&11 zeں|F#(FCBI]'IܢEqqQ'Q`s~i/}j69%glo<6e<۬֜br>\UDE/1 pn:#zhp,3YZ/OBX S:YU:'ۄŒxa< #u( r SudlVYPCeq'IOϧşҐSߠ@艠n XFea!A}|<"=X[9,*CS mA P- y PFPCf-܎g܂޲3F`Loo$|ܨueګ|#[?{±&IY}TzwJv,C#~ZMki"*9Sҳ7g^LN` ϚuRA]>V6@;6,m]MO-\,ͬEk4!HZ&v~͞VT8^]S`U~ 9H0zv*Q@ 08[ޙqZ0n_at(%=j]E,OUM}%r?7W=͛Pkb|.VpuqnbB PMAyQ0@J|o)eW&EkC~ؑdIvq%$_֭aDUq16mуٸl@f_; hR\Y?\eäxpEY(@ Pt81?> ^7Lp;hg#DD  ؁:kQ+iy]wi^^}ŝ),(С r:7(` dA?ݤceixۍpA1t,m  =Mzv6Io/޿e F"KZQ\"TȚCs9ͯ>(@ 3lDF2r|% z9NnQݥ kD  ¹^&P[^CVB<:i{uJ%3/Âɾ6q(@ Pct+J Pj^-D$|-=Z(wsǫqr1ZcZed wG@]DOz:O_{;(pNFބ21Ԅ^~p{>YVtɈ7..lmU 9˖𵛆tkрwWgnɾv$P8+9gNj)@ T`W1L=QޘwD'ˤfv5hKlZj /?>/\3 n͆-uXz׾s"<\[&-ֳ:]AN?V9v^=d<ԯir_ވ8 -9UrەD+04>; :u2j+"͒AN(@ WAN{x(@[Pθ~hGWOzn.gW}5[hձ(G+=0Ţ栴wT?0^n2hkϘ ' LX! Pg&M!jWp;n|g){Ai_W\B?71 &Ki%Eklҽ P8}9o+(@ N@%PspT6/5ǿـǾހؑ#3l O0yX]iXo-d* (@ 3u "D,jǓ Lਟ"ާAjxp &+:^tjs<~Jw(@ P䜞Ϧ(`(ZD,j͓ < kw"aqrvF?!sS֬Ȥ|zxgn L[m P8 9S)@ MVI"T4OgNďA FI v{LBlw~q}pL뷸M P`N(F Ph-؆d'XS܂;A pԜ8>).֜b]u*qDw(@ P rNm3(@ N`Mà.*MlԒ pUNkNJ]_ޜ0Y0t1U(@ \A}.(@ 7.NO-חIt2q9:&eQвzKE]ŝ^]n~ooB P`sJ"@ P8*" 2FW%"mtqǸ.H<Yj0CW`Fͳ -.M3n#X3 P`cP8d-|j9U˲b4zz\q\"cҋ$?lUzDbd (I  P8S Ptnq OVcW~&Nb{>~3~L5ibP\(n|'64(@ `҄G(@ N w9Fw& ĎO{ܱ-޽*$spVY*"I >m<==A P9 nP0Zr_aޚ9}RWஏW!<v ČzID09!%56[˚SIAyP8SR<+zSvF%!ݥpEСgz{^g0n;~̿(LAN3R0X^1j^tя;X|ڶ@<[wcuG+u I{s7ѝ P. l>(`\l9kq]JFuUΏV8B1b [`]*Ce3@ P0WA^_9%6wgQz6Gajq>sR-0GZ vh" ^= P zQ0@u}#!YaIq:ޝ\$E{w,'qîRRPQoҒOX7=gٛcm P~(@ P?Q^SǦ e.D\yR0{I. =u@m(ur{`(@ WAN{x(@$PU׀Yҋs߸>|jym=q+ ܰ[^'۬kú#1ԟsst*ܡ(0᷀_<:fj͔&D;p᳍+*S pX87(@ 8GW/@^cu(!}A _bq<$ɹtqH7Pq9&P0 O!Mj,)I pޚ'u9Oƛ?B P(@C , CpK$ڋ2T-HeYs-ss^>rP>Z0)@ 8{rѳaŞ#PoQ\QdX8 NdV|'EsDjRph9x PHTɔX]f^Y[w;)1faX(@ 8G!M&| :Pe`M)B UcpvuE@x}nH K^QAx9nP+ q=[N H`YF.˪=.joP |##szR,uQߟ/7gAc( rq>߸[؉ M}졟cZ&% pF?d:ꨠ# 0q϶SP18IWi٨AݺsJ ;z5 $ Scܠ( rTF5=MeDz.mD@T$ܽpڅP]Wi9:=+A3GXC 8{l.(`<9ڄ?K]A*,hMM߀(|1SڐM uǹC P8f[)@C G6z\TPxC֙2|?~ :@r 1FY PGAqPhU h%sK08* PVx,Z7!d( rɳ!T Fwק~Gb;0+a(πc~fBhju /+C P| 09_ PޗAq@\K*p@@ `c&ߓxGHsXwȤH446a}V8w(@ 8Gyl'(`H~ZU pܦ@QH)JVa.сވDB P8Sg)@CV|Dz>Blջ;;@cS6ڨ՚fsu.&(@`c׏,z&I骹bo><;C+w__xK }.{5)@ 8yl((`4C%@lj2bHO +-2dͺ F~Y eY P& ў8K F`gW1mWp8w(p2Ol)ѝbn` 0q'RHq?D͕R <̇ 2Y'B 0q'R,,CR>*8N] we]-{B P_QG{l/(`FU#AZrMܵǺ@q!nSp9H Ph凧ʬrWhug}' /_AN|ؓc'Q- y Pd`}@%_y-<$8>PQ/w<@ Pe(@ IZ_W owK%L@IE5ܽ-ǸA 8&Jwz'Ԣ*59 (@G`HOm #O]}J*z&&zNpM~~Mwx} 0ȱVQ(&CW?DՏU 9ǿK*f(H riaJ@<9CՃAa UGؘC^AN[ n.}gWqIܡ@ή?2wqK P^e(@C 56Ęc85;n膰rprvAd]<~ -M Pe(@ ʿ/NzwTqv7l#X1C r9rLwPfe(@ tKځ5(@e!PfLC/i~oenΥs{Qh+|y闹9O&cfI+jfn0q'vRPs%?@3OW"BPD8Rw9> PkMo#7޿|盿c^ҕ_ P* ^,E Z V=8hCMSdeYUB7f X+ PNAι(@ [@55_d~PtTp,)<] # 0qͶRP6uhJ'C\e!P9yL6UjD郙"-Tim-,(@`sDy$kW뮎 A]Ew( N+F>Nܡ(` rCZej懺21P=50(c!ǿkcK P8Sf)@ t i~Zn4'ǚhC]]4*N%nmx (@`c7 lIYu E˞ԗRSXWԖ$jÞw(@`@M%-tʲr4q wN*P]R7W9䨜{ J=QD=}6<0ȱGɆP&Qć֚vJE^YX.j^n.ևM P!8cf#)@# +g"Qg*N..F^ ; P" Q4I NU IfdR*KP__(p ` !P_/ݰ4t`9#%`f( raԿޗϰK#GtǹC*r1Jcqeu-zwZ(@ أ{|l(`3#E`/?k醡dL;X0IMšØ}Gj:P$ Ǒ6J N`L(`CMÇ X(Ц@E~>0Y0K)M(@;`cMlGGi9JO*Tsna'mh²\LiX_m P. ޟ0G ^`4'GM7 cCcCKYv`VY[b( rɳa⊚YnrB*Oeᛑ{,k0eY(@ 8G}l7(`^7_l4ǫzd@s#6)q'Qs6g⚁O>(@`P*p]J"l YS 9&xV$Ut^2jڑ*a(p rTM8]'WW{Ш;(F,Q=toܯ),YA#?}0@ć5:jhQ& й8NIn? Eu}#> gcܠ( rɳKSdᘬTo.~b@;]%1q8@Az<]q(}0Tc (І6`x@gE'J/ڮx5/ۼIw;#PWbľp4\d><<NX(@`㐏,~XEj<%Y*.Kzq//MCfeV5 w(@`Џ(0gNoUOB6m WWX<.=, .a M(`@g.Rb}Vv/^3Gf!qط@2g˺Z >n>ևM P8WH<~]pـ] Su6#u:UEE(ޓ ow\$cS莟=(@ غ[?(`b0<.2L[,-5k08!7HzqP_O=V(9ܦ( rɳ. jX^P?iI ÷mn8J/37Džv#Y{ P( qyk PB@ezshٜn3"9˖2ۚNvwK"wze@kC2LM}qqP^Aރ{ )0W4n~x%+k-uT־}V YNOAyl P %qxa]TLBi~\.|:kG;x`Aeͳ.|FK4ݽg)@ P r (@ URp$-]5@"M9;j pܱU/o?2 M׽ Ph[L PxWa\.|l;T^X4FɺVw"#Dtʢ>a>X--J3-x oN Pf(@P׾s"R$~D:Ep/?}C!\H0,%-{y::tpfdT(` rlѱΘ+ " Ɔ`W (mӝ̝s&};/#/uIԇ?]YWFYI&2#v Pq닰.3I6:3 $QA[>w΍@c}d[cYC[,>EQCTsλP{@ P0%X>b\/NJ[O١tGQpqcbYTE$zpoWEDD,j*ɀ!j:P8[9g+)@ UɄ~ ڍ׶S,&z{PfL ґ~=Ņ⫻.FLjOҌ\- xC P\09(@# Z%QI ˑ*|9ɏ9`9eڨ=uY3r/iIosP[S49N[Ai P6(LH2|*K 5|G 3z,|٫CjcG܉  6ZYݹZN7\ P(p rސw(`;ȂQU׀/ pG+BzuzATJ \=mP˪"YrTԥꢫ h;F\3(";?鐸C P 09(@c 2gAz!U݃GZz"H N͆^[jn܈ N[7B*=hjͬ }[(psRސT«Keh6!-jRM?^?$g̮4!?5i;dm'W CK00D Py`s^! ({7Kbd4IZ a{l>!ALH6P[4K)>>w+ +jkɆ}mdOg=2[sެޟ{WG끐øC P! #TyO P( xbF4Ispsi}͜" r7?"I7oo#LiGG޽ƸѸgLO\50z7ƩO~I If `c(@N`өp PP Hr@--úC_%2X.ޑn=zDE/> 0`+cSTde !eh7fTc̒!~؉P_O<'=77fvRPA>t6@TY vT:P<>uC޽uӚFOˑ$ʚ:;:Q򊀻oLnYd2P]\|TaI\;03RzOQ=7/K֍2wbmބoR@G 0haޟ jgGe SOcz9g)Y&H)apM #4!</O{ʆV[V*AM KJPw^&_/JXLW0SVYzaqf$)]GK0pʋy(@ o9[G PV҆N76Act/=b|lgKN \Y}~p<.pU/-{ Y5(X_yբM)+zhT<\ѻK0#E֩>==ٯ1gK&ޖ9G+b0shwxH5 P0>VAkѺx{nlll^~UkHdCVQ: kʐ%!pu򪗗J&dusޒ,~WdBK:ć!Izh0ir{wo܏OU)ms=1wL(p7j~(@;P)ߕ`͙ZPzKTϴmc4V l~sjM`̔=huf}(@ P rN7)@ P]j2ɰjs^&ֆE#qa,ݝam*@j7O(@C 01ca(@ ذ "6em$P 2'N;d~1ZoIWTeQAvYsÁBdeur]0E% Cxs P) 1;[M P TJYp ceUZ$͡Qsi&skBd.\dkeuxVd2ojy=- krLA]C1[ֳ|'O(@`s@@-zi%Ȕ+$()CS*,rh.ХGo8;kJ:^*hR fTF5/ X(@ P!8cf#)@ ؐJOm9VUQ=7~o*QK@iOjh`U)@ P^r=x(@ PcdHZa~]s(B  P(@ Pc)@ P(@ rZ(@ P(` rl(@ P@ 9-Hx(@ Pe9Xw P(@ P$<@ P(@ ز[~z;(@ P(BAN (@ PlYA-?=֝(@ Ph!  P(@ P, ǖN P(@ `ӂ(@ P(@[`cOu(@ PZ0iA(@ P- 0ȱǺS(@ P-  P(@ Pc)@ P(@ rZ(@ P(` rl(@ P@ 9-Hx(@ Pe9Xw P(@ P$<@ P(@ ز[~z;(@ P(BAN (@ t/^{?@ P8Cg)@ P{=|ᇝ_ր 0ȱÇ&Q@ dXlYUw( r(pz*(ӟtzqx)@ Pl\b^K P |@߾}1ydXfաW^.C _d T/JPPf̘ujcϞ=XnRSS1rH\y*7|]t-מ꾖(((?n֮]cbZZƢ Wڵk1f 6rh')Kd2i)))-?)@ F@(`x/$4I `6miz 7`@Ŵ~Ӂy5V# 0K,J0rHSTQH/EW), b}?=z@IIe_z8ʨsNH  4, 5\ͺ盷=<<̛HJJՑmX\-ǤgJ&=MNUÇCzpp!5#FPCi;,7(@ 01`U(@ P*1.wv>SG8r1Bј7ƢE2j^ 8:i}u78͝ւ!݁XtzQQQZ?kkzTs>a,7(@ fW; P8wǽJp:ر㤗=䓐a`5kj'AN{oyY?Uչ*Ɂ> |úܦ( r:Z(@s.'Y\\oFwzG KHH믿jG!;;j pfΜi2Y,466Z羖;h=V}Zo6%&&j2B;:TA>x6 \xZZh~d0K5Ia\EEE& Փ1~x9, 2)]v榨 T/rJ8z|ϙlnڐSWW ʦ}$GЎʄ]~uQsf TU*IFMB}i;/(@C ( B PhS]M~&Nh^R>cȺ9[nE˚nz饗L_{OViUe3IόYF;9sh$02Io:Z]̪ډow@IDATʜUiU=?h鯟ym_$驧Ҏtӯaӎ:@]OUoj[ѮSi~lnvmuM P`TX(@ P/quYzdy5,88ꭆ5|M j^TOo.gzQsXQuߓ[A%P>Sz~iHmdtv;:ʇJ%fWsR8xyyO>m^>^{Y 'SCKEA P,NY܄R(`\ȑ#ZIF^fVu6㶂5(~&hϤ(@ ؤy}zP7}t\uU6&Vؓs2G Pd{ZKT5Syl(@iA(@`cϖ-N09a- P(@ PCd(@ P(@ rNXp(@ P9v P(@ Pܢ(@ P@Avlj;Ty~,(@ BA!+A P %I=Td@qʐ]R1dM7O#cs\$!A'C%H Z\(` rQ#PY׀YX/k25eUZ-_ HC\P Q6NMx^sh=;uS%.WWZ-SL嘷# JsA]C1[$F&Ejf(@;pLv.6:II`!`Az C⤇edRŅ_LdDHs:De_akzX IU&E^ߣ $bjXt m}y"(@ Z%9~>(@hhbYF.>ߴszUb|1U @oC4F>;`#X~Hwem i Ƶ1#%=# QWVHA" P6gUL-Q4I0-+e-چ&-nȂv)zf[@a(@`cS(`rh^-V rzEf ^0Ӫau}#2ish<5&WuT9jMY^.pwq6w'\DH0xI: CT]Bq:nT/|~|*/sYx1'&az<@ 04z~0(@8| ,MÛ?Jv16Ϧ=MQ͆*A>ɫdN"n\ήprv5$ 0竿hy: 5hFcejJQWwٰO5|2[|vFy:$|I>0?0R%ilj/ۘ%O?c%KEU"/y8tX %@RBOZ55_et)p^s,(`9y(@k=7jhZtt6ũA^2%;k2փޒ(?6i&iGiN6*2QYZ2~36Gʼn=ڊZ֬I'N(@ tgS0txbFmMO2ޭadECx]5Hꎠɼ[+EE8?JEMU5K{•ƣO<&/ް4c8K9 PT9(`5W@wloU oVk 5{t뎰=5^`cUOiv6J2vXvHƶG'zף98[/b<#~r$ (@ tNaR0Z#o?ns2j`Ps6׷ٕw O݄f}ڣ 9ը-+GA x2dލ3~/ /=[`~/~'JHڍpi߮N!(@ RA)x(@;P77dm]>KnuM]6K2}LhB`BژcZ*[[AzSS`dk-kLl쉗gh.( P/ k@ P DJR?~$GG|Hˌg*3{I<&| %%kۢi N:һn;!rߌY΋(`cueX 靟d vaUGZz" w/i=Av^Q_]܍Q,Kn1-r,s*2:\AN(@ D`\"T7˻&`DbDi+GyX>JIG Z\$#[5(/('m7_3Gsv/7{Ԣ律WC_Ż36(@ 3@%&O=]1% Sŝ{ӎZw(IJ 8@aEUΓ ĝoн P8=9ų)@ N6᳍ݓZ,B .' p7";& Zy,(pz rNϋgS0@^Y5yc1$괿LKك1ΎIYU-j.*{'# p0R@;Q0ʤv(t~-Ь*VRUI,@d'1ަ?i }Bںh=:s6g%;6(@ BA)6(@ <9w#6Khh D&DZyՍH,Y\Ob6>'T5ϻ0'}0sq.ǯC Ad(@ 0iϢ(`L#E#sa"KePt2~]D*Iܡ(@w(@ W@- y{pY81o؇W!vXx݉1@dL2бJK=pnd(⃟(@ - mC PPn I{疱z;&FD~NJԽ:"Թz7%" ֞{wٶ(֖@'0t~$(@X0>\z8!>$FܰM7WtE[7,³qy88dkuq rܣ(`8*Y'宏VAP?t˟% RĎTy֬- ]?l]\,}x ܡ(@(@ Ny[pTczH]b/JE#{;-ѸQ`44Z +vbdc(@£ !|I ?}Y "OdLS~-pbbڣ!J;';z$ zt|>_k}(`% &𨤅N =c{[YTQuǹc?>>ҳnC%,eӍuǹC P@ Pjo|ջ?_֞:a{/<w~Jw cC=EBN(@`M-ķ1w &˺u#OanۡellXTQ7VÖINAj P" XҌ\|"g65k; E̻zZf5sS90l7ϿYA#?}0mĥ04!\WGՋԵp@wZ(+Ktǿs 0qoO N@-@!:}nj~NjN1" yy!$9X ҿ^\ MRhvB4 PKũ3Z>$[  D'k4i)ŭ[dm>m P- ǡ?O M`,L#ؕ{Rtǹ8@_kSQ`isnxqI*T0B P~ (@ HesѻK&j5kQ*;PTo;v/yHw;UA>y0@ym=>Ÿ*u,<ʿ8;%ڿ@DMƣGDZz P8g)@C |aL9^jD8"#uǹ}qh%ޒctO|' ktǹC P8Sg)@C !HJD~ʪܫ7[]c^k& pl 0׀2Kѿq6].nnJLwQ_A! BWAz" 寢 Jt4] !@HB =z۽{7ym#YtFpv~uKGeM o0"J=w`Ԅש`jVQZlmog6Q4զPrw^mĽgGFP܍r0Bɫly54۠"r%ٲ5V5)S@)RO,#2sF@\R:.iG.'UE@ti zAC큒Su>J`Dv4# gokj\ژ=m1r+i"-`(ÆJRieF)Xɱ2r>V*jv2!$w&P㐩MB ^ e ,kXFUXɱՑ~3*OWQ*~F*&y\R^ZP'tYh=\`kAkI#h $i)\Jz\8FǨ`RA=/C)F!x`@f$# ;)>10 5kbBjjtwr$GGG xh@l_qF`Vr` #=n<[]Oiɉ$G|n}4J4+9 0+94WFP 1_7ㄟ£ɑ-9'-5$o¢\03=%0M!JM 7w`Ԃè8H&q Dy(; H|OGvUMw);'g  x桤'O(qnfF`%ꇘ;0jD $*05샰cJlݺN!Ρ۳S[9sQ_>Ox<F+?&4W0`%GuC b[@N/'ºd%gp*Z{./>g/nA9I(g .0#zXQqFO % v#Ε+16_o?d$e3#`hs&;0֌@|r3b3oTH?GQ{ٻG~ubyG !w)l_KL|ېsy|=rZȣewr,XL߻w(V:Z1J g#.+ywNIёpbYw!lG_6"sG.yC^F#"ONB-y Z\-;9<{ `Frw`lL:UIlFr#>'tw m( >tkX>{DsB()r#Eܝ3I%+oޔݛvuso_v.n`3s|R=I#(1<7$=+P}?}BB֗1*9B9NLaV-c05#`rFP ’L@f?֣YQɵv3rՔrKKJw~C5ƤT²RG  F'}dL-@8T<%Fʒ|My+&;W]{ZFH (љDLCF~Xɱ12 @DX%ʪiHɉyW\J~Vß\Wbd#cvޖШ%G',ɔp@R/dF `% 0GI& 35i J:dxJE)R~-[]( "W?}GL2*\ u"1*pBX0踖<ʛr){v ߌ#`+c+#dU!jjO* S'=(ެ { ֳyDG:RT9*AaWP@I5sK'EHI2 n|-b.RUtq,A[I"a%DRms&<ߢ+Z f9%E=TվC&rAߍ'qڡ;V}Q I#ƉdH 7`#JN1FH@hh()V[Jr֔OʝǙՎδ/",23^\;'t @nG'*6a)Ag@zUΓЏ,j0Ti@_K꒝jP?e9 E]RO(_Thyܫ*w&:ܝ. /Y<:-.MYNK}Ǩ!QO%oWF#0V@.A~qF0+!!!tuqDFFKŋ7܈w(qI7 ?OcQS֭ȣT)Zـ%ʁcBsm\>s|`ԅ'Rk| k1LjX&!9xPgnyTP.ѷl ^i߹ZzZϵnP_SAcN#0G`&e4>`lD1y&Ke ᶢ؉@"E5k귡+VI9q6MI2y~~~E'RU)(3)FAϭ@~-~'S ~9.*Jx4j(R5`+G+`#` \ʮ\"?/_o t5J f͚鷱b*<{E_?ж&tr>{Dķ<{%j Tcnz#'&1AI6q~o3#`cͣ}clPbB'X /_<+WZn߆"dJen|&9SCc$@s_ެ۹˃GPZniJ:d] %熸LF`+93n1#`ܹs.\ ?ϟo 7o^*[Tdw.@qwwWv 09}. {PTIiZ0B`d%~ig( tӪ1#`-c-#`T.]$E7 Pf`[. :T̘#_-Pa8¢E6 L*BȳL4ۡrbAZyV(K2Ҙ*TQ}`-!JFh*2\xQ888HײիӻKV,XP#3]3 K]|WrP{=]kj⣢4.kOW5yI*tOS|c 9c #0ZB-P!9qݻweK}}}&qI3(ojDS%7.Ks$&Qn@Խ`X,$b&MD~r?' +h֓K˖-ߌ#h Vr45\XF ABmFZjXg䰤@clyLA9Ln ]yݥj~z$xber86>PjSWviӦOfF@aF3G 42(6 *0ڵkӄ 7H㄃Y-8tbs PGJJNVPs߾ED&j/i2mFAl\3R0#ZXQpCѣGѣGȑ#[Qh_ '|Wh\]]sqүJ+WNGp7V͕oe[︄DI=CҚES/b%JxQ10`%GC dL&gϞ5Rhz wũ~u=Q ,I)Jwی#6r[a!w($Ԅ70qAM(2v֮g%G>3gccl ڵkyj=oҏJzݎҝ:# +.]JjC n;Mo?M&G9M~DW_ݻwyb %22R)J1sb6md~+\fFBd%BeS Kǎ+5"׀QFK/oXl`a1/Aree%AAםAAU `5=* pyzx=>HOZcu~^4wP isΥwyG϶ER.7@ٹs 9[L5jSFQi-?2[A}T,ߟaÆI8PR2#/_֟ w8,Q۶m.n^^^2#]frLNv!3",4ٳG _pA^bŊR5jF0K"+ϟ/W}||hРAMmFPn SNǧ>uK&q;В3XI5#<"{NkjQ>@A ,00o05n߾]~-Z$'+\!#d vW\\0/aaaJ >}EJӸqc^50Y;͛7/_NUС\ԩS@{h׬YCڵSvNE>XJ7H_t?~2!՚'r-XP7l¢78&D& _R@ Jׯx4o^Oō`#j&c2& BSիW-Z2sLj>"V`=( 6v< ܦML2õң'UA%=<yn/[J_vAD"K߮v1oeܶm6ںu| EsiU5dF@ՑvkOXjS|+W V7af DpoFׯ'$IӧTnsE/\ _(GڅZOW7U\t [Uן]8gݸ{n_)0 ] ⺔cj5b @"ؚ4i"-`, JN@SL!3᪁$|(WRKs; _~WJAXmzfԁTUhZN87OOzB~-\%PQAb U+hpF.iaɄ?hZ469e jb  2``w`!J ԪPls玌cb2x 64u!*n(&+V57xT E* f)|)9rv.@v/1U| 7DV난_ 0ƒr q/m` J ݌@ّ#GJ ^ @<(58)3 i|,)B$ ۣG&(8Z1)s jS(b</9nRڵr #z@+ 4ϰh jPv7f[SO0MbhRĻ) |A03X[ nݒ%arYlΜ- qR={ ?@nEtyjzlZs=PW|55,C 6g+Xt \aUTx:w,-降չ$#VrT:0, -_Pjw J!>`BsrrRAK i!%g͚EԱcGkZ5~ MxG>npYNꖡ蛜 /ʋnTBX?qtq zYySv=I\L*5~ CR 2@H`@gojSF@A&d_v\ `@S|`A1c ? [o%<<]e:iQAQ φ V O.]8'VrL'צێ0&ܸq/.W4oޜд0mD 2 ƶN:4vXիL.hPTSTb5#6Jn{Rnܚ76D@ׯCІ'"/N?Gqr\QP\cIرC*<ܿW٭[7ڵd8Ա㽌F`%G#&#_%?]ݻw=zIq6/[- ǘ@A. kgn+?mƿBM<FQE^YPJ  +k:iؗ:CuZn6xa!DPrp4HRDX>|֭['?׮]V`Xx ё S"{U#J-αk.V-YZ^66 ! ) aߖy3٣[vf cw#\g>GcWBYց]Jr];aTs2*kF/g{@x ހ҃m(D,)8w^A3q@XfMFP=~B ..KVaL 6mހ! Dqw3Du6A.?ФW/8w\x5JX855{{{Iʱ{nb0=@a0^Z>_ ,`w[w0@`KN!r@*UТE$y\tIZo8&LJ,aӦM͛7[Wpx+u裵[FcKT7NO҉0:(Ac|K|M\HA'.}@IDATlx*8(`!(9+Wd(b[o߾].m˖-#7n*TOXXi.ȵ0@H})(Cr!F| f޼yԤI*Y$}w2f߾}L~@TNNNK(;t`4a/&A JPy(:yy{;Z? >{ /UxnNm+:2!`?ڽ^ 75&װ(1tPb= y 2A!\k|MO= #Vr̋/מ R޻w/ 2D7PHɒv]B<V"+)Y1X$賦<7i1%ݏEP{7RX}!V1I½)`V*G&}5$^svR~4sӸqc4iޚ N.]_~ǏSll,ըQCZҪe(<]5ÇKl 1x0`%rDƍ_Pҥeº '(6+V @{b=PsT~}  Xp#Z4s90t|,1l,F(oIy;P<ϟu/o☣=-ˆ ^}Gy;w<$ mUg;:::x6 8P.ݻw~ q;ꫯvab #VrL#גI?J+{c7ސn$C nڵ /z:tZn򖫯y+]?Оwدni[] :x^jtXwU @y5qQmPۈ"h.\hP0!,9\ a ["Spa  #dfW~|v&Vi på:Au+WjժmϰdswѶЄ.TqRTLvJnE"E= f SQ7d.PUZڥ ej7k|` *|k(73ׯ_.XDn0[ZchfqVdZ9-xhB,@,֋\-^Pt`*ӍylbnGӑQcV^"i!+:FX,87{#F ʆ34k=B~$s ?Sb2Ob9р&q 0@%bZWfT+97qxY.]T2`pTNF\]fRM6x<߬¥iNFq Y{[(:?@ś5#reӯK  yj"n 6Pѵ^37eE&Fc֭4h t*T0%tX(<֭RX.0#`cCo h AFd^}2*/d8O8Q2!wޑYULlH$5nvmt7[O_F#r<I#P x1YBF8q3ZMZ/L_oCaX`u?Yԁ"~EHR<9XFVrYF4g n":61m a0ap%x/}17[ODȷZU*Rp̹HhǑE^ cWsw¨"kf;r3/!!A&"p#i򃸫zI7~QӯK0֏+9?!Va>fp(Q9R݁6@X%m^}D!0rKE4_9x P?GnڴI=lӱ8 r}9rDR !8{DĹݬ`K2%ߥQ,g^7?BsBnt.)#R fA#8*c,BJL-hɒ%ƌtUL2r t,-"Ҷ8Ykdn\@9Vo  wįJ o])\V(^$&YC/;bpI찘Hyߣ")Ӳ Mـxi 6) MךՄ-~D?ls#p^$ߥX$_E۶m j*7a޽!$+^83F!3m!{"##/7H`Ic:ȣƒɧJeSr3[Sֱ=)Nd7) W&E棖²`Hs hT;`믿Ng84RˡXg-;HY1uZ`L+9&ꪉ3g$PAرrŞ)+nVB:tް[b襻΋FV [5rqV3yAB ˝Ço~Z5%UH[l>I\|E[k׮lp_?\$ `{WBU+CBXɱAVBBBdnPC|j\wW"ΪZ`}ҥe}9 %ą iӦXK..xGXEYɱaB'L֯_O*T>@p·,i%% "~ի[IE}[=ۺ*7Uh*P*Ѱ*~0TXnԹz ٻ⸥P g郶ipaϝ)N(8H B<7xRKX͚56oL",;Ç'wwey#bXQi{ɓ'ȐI-9aYLH VfONL۹YCiS7&e oႚ{ӗ"P>& yUFBstڅ5|VLX8;qBz1o޳>,_8?H'{״dX j&0f!-gƾ!mVX6di)WNd8Fܧ4`%'Mx]/$a1lҊz] H,I WVs4h/]{A_tCF'ȸM~tQJ.)OBDB@X؃R("^D Yq֜ Ѭ>6_pqcaToDʀK' @QF 7ئ`%ڇfg(76m<pEbm@,0o<‹ 6pQbZ3ui?TȃfKeR' a@Ӆ0*BɁ RkeCqqq2F%KȜk6}*N6Mȑ#f0VrT;4lhӺukI.I3 ˴gh5T}Jqn4&{O>!XDm!* Q2!88ך7+9? 'J7J*I XmXdch@;=j( W !*UPZhμ ">O{E"9gNxD).yNL*4.K )}l-q$p' ըu#H> rXr01c?2"O?$]*OXزnY؅W_}%-*&ZH37ILN* BZp![ö5W j^z穢`P κS7=焥.qwaӠeL;Xҽp:{; ?$RXGREa;ɜ EhOnTF$,-S@!+j˂ :DϾuK;-ȺR; 4+)9ihigϞ-,@с“/_ƟZǀo1Xɱ0VB~gz>L;8Kqq # PXo`ŁkȸqXxZqթ6u@8PXx#S-G(j^YO %#4:FljO,'<#b~Q(3I kʈe&ՎhqZ?{XZ~T+!ϋضmu֍:uDK.%~=ƚ`ٙ9sފDo6ZFLc&`MZ-VS@u 裏(&W}-RlڴINDOԱj1X=SYC7$ Ku$#Wj⳴{ ʖ-+7]vbWE5 'L IUgƑ|v!(8Z{njժa%y¢% (o>^3po+!h *gY }aA\ ABAŻ-I&WG2tm{㤥+m,5Oz!LV]Z5LsRBe{5nXZxxcÝc%'Oz"b) gϞil+ȋ4}tݻ7Zc9ݲeK˵sNWHJǙ[R8',="^DT~pA{Gxu<"YITs8>QQp5CP t2Z/PU+I P$::)"'ZΒ ~ 5XYYr5r!V "-; q)FJZn˖-<((>iAB-F 9ׯ_>}Е+W$^,8t\Qݳg5oTզ[OPXFҁ0 (A7h̏ߑwa?䖖O!I)|ݜEp~2eҼ>\+@iM(իW~ ZUb 9 8S0Xz$0aaxn__Y^{5x:2eʨ,"бcGҁTׅsQժU I+T`>vl޼YƉb(>}ZZt@r!ne:+#)$m׮;dvf/ʹۮ nh/ TVp=j}\\[2gɀIsm"FVegW\L5R/XXqvE`ѣ!mm"KA.]n۷ wܱM@YF,CA*M8Q>eaRC>0]X`ʕr"Ԑ}E$BbNW7|SZrnܸƮDlݺOHr,#,Ǐ[-?X JNFP2QPϽTR28X8j@I'VO8!I% kJ?x4'&ڒu҅ .,Ҽ84+u֕Q(}i6kr#HbZ9rtbYڶm+I<cF"YpG/"Xy2܏O?$!o߾Me9s&ef>ݚ@-!V`Rca̍ԩSOAZ٩ߜ]H͞=W3P&: dX `'rfԩSrL?H VrRC%k%@ ;ׯZtkG (]4שSڻSw%.[ZsYrwnH#JN60GF67dUHfVeV w gL?Aqqq1iܖp :pvv 69 ƪa ?P"eޭAAAi7$g`%'xoڴI2{n5ni&@T[&_V+F ZBB%cѮ];t8eʔ^͛%#(ɇlc<:!JN&<..~mԩP0A믿H: g\F96c =z4yxxZ\k׮<O$]r%-_\&t{DxtAX\Tܺu:ʭ2dګWJ'bpxbʟ?&jࢶ24åZ?䪴NPF#رcS/ҽ9?JZbqE] Y(V6F 50G۲e у:w,>D +$B#/gr1[G **?tR* *TFR},~CB YٺnNYrT?Xԅ*0Ҝ^]c888\`ȁ5h BN:@tx0@ :tdK39b+es!7@\0W9mA?TlY-7TT)K6%[)K _~)c@Ν;vɦGd7"&,@ZHnÇUVUY`_777w#F ?^J,S#`6o½ U-%,9gϞTjUϴշprŠ؜SZ}C/Rp]"JN:&R?0i# K.-iGBV0Cb®eIKpr`Z9so%,@Z]"]vgϦUi\epMt$۷_mRtm۷K-[ʕEdgaԀV,[hA+jYxEVZ9kKi3&ȏ(: &2ȭ[Z}0-9/M]~]*9ku7;W PT wGVp9_ |7ԬY3*8]AG,D4d}NdU@<X@0sXoVr8"I&2Vv^oJQKP 'Om۶i6GR19[jE;vhFHL 2c!fX/Nz@im%6<<[ҹ]h׮]4v!k7҄ d~#,B>P[dw4yd8pUh|4_ӧOY͛")x%vvvh"B~į*|LKM0E-+WNT߹C`ɒ%:$P駟R01bAG/yl%l2 0|\]]uo6˛Wȑ#̲zj]%tŊ?gx)j` "Ģ|EGG~ʕuQV<ݲet"H"2im̚5K'bju/b|L̰p#=ˤlٲZG*A@H% 3g_ƬK_rIyLJƍgztu ,,@fM4͙3I=z$4/f-tfW3?`*V}%3Sv)1߿?!DQ8rj섅](Ht}IF`?<>E*rOjg?B#1K%E(gNQ?6s>bed,@f*+ &'7=39!G˚ZDE tSN27[oEO1aK.- 7psNiB_y*T}Ȥ1oĬ"5ڴiCGݻpHc1kb)OH#] O >m۶5$( AJhh$2 }qa }JMx|Qɩ1*#(8AAA$RIO@ >#iAF" RO'e>:r.:cx0F@GDVwC3:1bҟ3W\ÚNXvӼHXXN$!N 11Q'ht!:ī)n:jCYȌ3t͛7Ϝo"|_Pvtݺuԭ[W',xbNP82։"wgܹC@Y'tBa}=:1ԉBPt",<úp@,y|5Q 9Hˠx/i?Y`M>-'e,SB].]+Ջ zX6b@ qF L]u=zbaԎ2ٻsڛ]99|p3wa( !" t͚5rh17o86ʔ)}]vC6ˉAEN)?u ڵK䊉}s\DDyE@ƂsE0+5%GX'<˗/P'J]ݻ'+N "X*UTJʠ+mS7s%R㣌pyUvPHX^L䠮;vvlٲ?YuLD&z0YA@*ɠUR,]]" ?(ڣH6<⑾HL)""B#1#2#ܘ J-Z~²]M)|6 [~!N_RܯI_8qdct"DSbE0W b[b(e*i$IHG9-L{s- l M,"y`{ o  [(HM'y߫V*O }7١@A!UGAdK<Ҧ݂K"ʹsmfG**X" X+'D۩t"1Ect֛7o3UN8ԡbw_`&P*wZmP`Cx|o0,(OZ( K"9ñoOY}n FliUXyzzJMwoq;d18o 7 `Ò4UVLbe" [d[NJ&)iV5g#²#<BA 60JL(4:^'Çu"J'+y^WmPdByЁ 8E}L|vwwqʃ55 PHt"Od9۷N,8+<.\t`quD'&)Quº1C㠅e4(A :PpT/)Q?XDBUy}ɡ N,r/#鄋Q9QBb:0fDNDߐ>G`;p%}~0֨QC_ijB`F.F &+W⟁P*H7iӦ1d>>Iڅ`l`+O9yVV 5$ PさeDbbX"\m(̈]XVP7VVq]Xúw.)A[-XΈ] V"\߰(q>8_Ǒrx }L"#ګKi;h7,dG;`CP+/_^ "F%KqkJ!܇\ؠASVuxhhRCU'ՒkE,@v@٩e,V,B/,r(](9uͬ^:/>(3yHl\IXrG-pOIGӳd<>ELa%%Eh3OVw @f-GZm?^d'a>s' \ YW^z}U$'|B" ]­LRRt5IQ ek&#\WH~\ 8'%Tm1端"]dB]=hBKHOW(9ɎOF s]Æ el7s5piF`^EdIK4DW/e,PLfZXCis"dfy1UpAH`K~op25x|L*B餺vqR /B%yhK&D`ȑAds&b̎h _@X9-# ĩ1sT:X%㈧Q;5*is(yPSt("qCA/G[2SS҂]Frì N(Vy1OpăbkEFX"BѢE3f$drO洂Nd@?ZUpGƍ#dWbTH} -M4I V(9G8d W/X-"]P ;Cg͆,W'Š^k[d ˅x=b}hB-S;|Tn]y&4:v}Gʕ+ğ"o/+G&JmJr'N硥D[E>!I ji-[lbT,XgQ%wWɾd.͚5K(r&' &C@$dG,9Av)?A`ر2gܘY@ H. +-۩S'mҥFI,_UL2EZJk?~E@_`ͱbL/Ba$6LiX-Ç'o,H =zPbh̙icD@JVz-7nܘ Ē/r++.թSG1/F@$%ALǏq!+>EK2jՒxaE]JP O]d;;;gwaA埝\3!Z%AxoذA<\ ~ɤcƌ^s70YF[[[r4hV\VPQǷ_2E 5#kI/竹66U*9/d^q[Ӵ6l $7oi+F 00Pb,-9J[IӵkWHߖGwߥsI,˷[flrj %5cN9v오 ,@v¼m6~,w…W^ZFn-tDȝ5GEEe >}VR0=Xo!0`QRrBBBbjPaa˗ +ޠmԨQv",F"ߊEbɋ[ˊ)Vsz۷/3YJvmXs6oLW\Iv2TTI*Ŝc5RL`ukٲe̤Cm;΀?\3Dp59lQɒ_ɒ%eݻT,ݹsg*UPhFJ1,XԃjO?/}9Izn-dĉtE`d˫ZCnsϞ=[˗ܲK2tH*9sHbe?[,iѢEn5bxbMV %&o~']`4#ǏShZ˕+g+qՌ@ N5Zr֭5o„ қAߖC`С AbaB ,kir1]_A[Z5jݺ5 x oexCYF]vIlTǧ2@˗huھ};i-0%e֬YS۶mAB5Òm-͂|8uլYSz%K6 iQK!CS +@`ʔ)dܹXxrC'OUlk( ji]l,E_-+շVhQ%gƌrDcn+7Pr&O,F}= lҤ {:P~ҥKfW6uԑw/\V^M(a<Sr* Z_7hHp 4n=!pU4vX,9W`TlY閇,CGhhW`  [jH(9ڷaÆRѱv9?HOyDXHH.]XCwEɱfKܸ7m$c   yfβ jb8Pb< z$y2jZ g}FE^F (lag5hNȢngg'-:lI~*J} JNJ,XWrH Z_W/^ܲ[ d]㏭OF?ꫯ2[(C;vHpI<aaBtђ2r|䨒/_M6%deaL}$8hWk*F$&&\0Nnnno7rISv(""r+tV@IDATҲjp3@…e_3Q%gҤIt bj_s Ջo)Y8fߚ{@wҘ1c-Zr*[nQ)**J99Ad;,@Ztޝ6l@|X2P$I[.M>_Wuᢆ<J~sG3,B$$$hţu. ˡCpwhJ0SJ9 ;u_itm=}"5Z($KhJKLCSj<=IgPPO:R!Q13q8tM`5&ahV9jC(a=ѥB"P^= 4|I!̈́fj(fN[j՛uwf(*#4kX}""K]5aͫz4/טUVG8yL7OJ!?Yfȑ#`!A=<i3G҆ `˘1cn!+98p.t]%˕+h]*׭د\*a`*۰2z֯R^[gX*lR&7Euݪ&խ(nmE5|ejj*X(\dse;vM62e c^H0;wDVSÇ7G҆ `0$,, ?<^uɞ`q=cgϢgϞZLʕ+{xluo8U/"TW(tU*7@)R֜V ӹBT-0!Z|;}4ń.hT&XگH|lW1{Շv$$d]^ҳ$<CPVnnpGJ>&Id$'!Y},MZ*AA 4 *@*ePK)-Cq(ިĽЫ)ZUKA1,yڵkZ qqڒk XDzP<ظq#XM2rX5jmtX,xrr**JN(GDDh5tXKgҥhҤI'*{cQ)0̗c]šcWpJB||`x)A/Uד*չRh KoRX$ۜd HY:εktT(Iei>_WtU ^U!!o>w}vlrI9!`%֛ƍAb vWdX2֯__6BA{^bM6GmΓ̝;9?@llk.ͭBNU:-G57lׁrˋtlÁ pWJ {+_&6y,Cͭ"v W A}&de|1PY5eYOU*QhS= n ɪuf,^x/b],|wZz׮]ۺ"IoNӐs{ٲedi\Nگ_k׺:%%^^^%'o s=` .j&:¼5c}mki0!Gimq$Gt|êt(UQ>X +1]Ѽr 6 |gxGVN;̼~PVaQ8~OW!A`O*:?fvyĒS4Hz!R8rH\pA+]JesZ|O[ ٲFzj"UuS.i5Uj眦96p; ZUWVJG!0IULxs@ Ty|8/i_n~d6A޳ǒ9=e .^O0(1zk|c8URiΑ!кuk:tH' 9EITy?SlWL2E-gC.mڴ\M6K8!PU.rյ+W,ZC6t )=F6nfp4oH ިRnu۩.j ={bK3O|*qjJc+C/<0u5uRT;#<3J\q13ǙfQrO(UŎs!@ƌ@s5+veUȏnWO<]L%< h߿?X`ҕr1]}~7{2PVKO}S~{*WwHm'~ۈL oYƌo/ߍTReRLBCؿz}Z1Vu̅… q jڔv{AYJ.QF KÇXr?T,믿A$&+0H)>% ^+%BG`p&*;wFzuQM%k5kbh,LtgwXjhPa(ffM[d?4 6#9#дiSQ@-9\ >k='@V޽{^=C%!-|ZN6 Xr Saz7lk?e={K*V-hjU%CϖEU7>B"oV6cIlh^@}n@td8o8.9# JNθXro޽;8xB8wVXU6WҎ `/̟?_KˊBG@,9,+q:3vyncǮƠ".9[T\H; ̔bR1TM_{+BUִ^HJR֨uV GγV}<~egK0M%Γ8fRrhYtXq3V.˴i!+$8F٧OPil Xr Ual۶mytm.Clzyj19x)-3ˎ_+4k&8*T@;!޷4w~ʦȔTq:?톗4ŃN$nZ(wX.$ ud#Rr&NS&WoLXg˗k(]/"uXj AiYSrGw(A棎5u.;xm>D[d0q+š1AY\X1Ovu(">f͚#GZ9u`VK KIlzj$""/9&\?@TB F8u+ALiτc+Q^}o4&d!jTU1Sn=w[W-Y #mJʕߎmzw^ ]W^ŨQ\ %pfq+#pMgm# cX8qmVv9NwLwg}d:tCsSS#jlMZPSǬ(b:v|gJumkik=y𝱙\ <RrTWH0' ,@U X ΄iRL꺲pr6z|w{Ν(fΝ1cd5cPuvrssΉ=Ԫメ 8|/݃J;7D,pl]Yz©x{k>M[q>:pU6ʗ/ͧ+dY ,^XAw5!AުR 4h`f-AL:-Z@&Ml΋0 I <LM6G͚55iXq"¡=~pWxNʽi BjTLj:(N1YYl5LK\ʕ+ųEFFVS&n-Z$D`$]@bѰ\Ϝ9>2) f@֚?q1|Gh׮]Øwm#3#6:w1-5Zcõ,iX?`޳k ͼ\| RrJm6Qr,3.*-BKN)<)lK@sjҬ:>oTo ~3+z4yaq0U06;}A+urgKL5Z2lBE@J뗰B+< D@zaf-AU<1CB@HHZ-Ƿ_t=1eTրJnt"YRY80f6`Oq" .p@ȲHaJ/Ue]5k֠m۶ro;fͤk.ٍ @64wY)9|YwղH.Ν;""0}t-[Vb,4bɱhV:=LC(צrSU+,Hv@vq5.f<׳ BvXg^SJ :uJˋzĖDG{˒ K6AJUC&9AbqA|svmTjݲP !5E䶤B5V/79n>t|y*9ׯ_ѩS' t-M2111سg$p e_`xo{N(}$t^YPV7@H ί 909bhkI%!9h)*6ihIHpr<]v+ftW̉ fddH<9Al? |XyqfĒcߣ{j =z550DI9|1;dQʚɲYw *;4w XgTTrvܩATItYg8\[PBZ$uj"""h"N-KG [a~ڬ䏖EP+W`*[֌RP츎àϙInJN-ÉRرC,.5/,cqJwy kc Œcȣ L|;ՃZϢ]*gݑ (&B@pZ}V0pz8j+]0w OOO9Xv'OJDɱ1s hɑ{)R30tP &D@,9 MsIjEo p3n0ZP0T| :J+k3;ӝYD-W%Q,(Q'bǎKSk Âɛ6mѣ]+I/+]nn: +kj)i5wɧ 2u!*. T)=T7k2.YȲ߿_2 B9ZmNT-["/ 44z%.ѷXrwku-o;dj\O?owiÚWuڰߙ6ĒcUɡ>5j԰'ҋ 'nB3 @%gԨQpq^} jzHU˰_6uyC5' Zr]ÛsQԪU !A>X͉eK>|w-pŒcC}ҷa%K*{fU4fM}| YR*a̝˨V5ɆβAKY~4s`hɩS9\N뫀C!AmqĒcG.bx 'Il@*Ǵpdsd?jU\Mi`*`r>VrFKuF5W%Qr3֋(96+/W~w{+I&;R顙%;NiXfrB PEl9qFk`'em~TrĒӝ`}9>uJWpWoҥK~:իPN5kpEqUX%ʀ;NF[V-Yg3;?jʕz!A 7*%Nר DNXT,9FJ_L!-Jpwنi!㲷S 믿jY! \^ j?:ڔiSNbpP.k+`ᾳSu-6΅;jYfQ9w.?B;(FeJk/g#Zrn5W%G^ܖ5?s W‹N@ll,,X j6Q|rꪶnzWKprWCVrXjCK%Ukld\~M8qqWpc={UT/!A3gVzĈ,.:f!RrU x"@\9l8fBHګ1g"qWhcիWIh!#oO>R.BE@,9;N\ABJNݩb+JgNP dCߐ2)$Υ䈻>–Qɹv)ZH0'ĒcNT-k#˗{vMĒc?>V +jR#J p(2%*{/JH`zQuF.G%'::ZKkWA8\RE _>^,9ߥz%Gq\jȝNcÆ fÑK ϡʒo8r:"n(k/@~‰l s0.8 mJ]HJ/$/JMs*mY]{62逐m#`S\aJ) orO6+,U ǯ6u:k{Fdd$o /ϙ(9?#ZJ0a66l0q \N@XuiRR EJι(cM8 EEEIvN+ mJNLL֭Ѭue˖ԧO'ߎ; ϦMo>֭[ ן+&M{ /22qƁn_T\=-Z6=~1ܹa]լy/a}aAK-z{*utRjB/$O+o"fJ9\`UCٲ< 'qqH :$_J !)iYt[B%';YX~=}=swks nSrRR2{ B9%RJƍuk.< S7@vgA Tp<~c‰"bɒ%&'N`x9|ҰaC;_|EgwQI ?;=[&֭=#o־x>Ӡ{"qsY dxxNpЍO\ [y|gMƅ/_D[ Mᝈ?+󾉈7*4!7GX3uֲ]6S'ॗ^B:u0j($$o2e OTq )9ԘH%Jqddd ""j&Dߪ΃-B+V>:Bג&tjqE쓵Xڶm[(@ 5kģ>tqUK.ZM{xG#F]Ex'.s~]w]T;Y^8iGEQ,9EA2+뜀SH{x;Bo(;:+(3CMqwr[L!~ȾO?hcrA ojfwΧ| ի(<6%G䈒|1TpXʒS7oެhq T=fܶm[q6˵(/k=[*SV;&M\S'z[={@W>G/j{)^W(qiڅƲ ?K9E\G# ;çWUSə>Gd<RwNk^3\Ώ8O~Xb~hYG-RQ n&SX廹OCT9٢QF`,ɴi4)iJba􇐾 EWK^۪P@ƥ̛7׿vZ,]T?xNMWѻw۔Gb˖-cǎ;okjݺuXfRn 1_4ҽ4h F<"o7Xx e%6[֮utUcs1Z^̙3_V hGmJv hkҵ<=륩(a'uA/w_cp:_SLpC-jl[ w?xUs|$ F[hCS.C:)$ ۅ./#5w<si͜'v~ʼ`-~״ݢ|7>T #\Pr8{ݨçC7|xLj XWJe3f z-|ܥ) M4 /=DCE䩧Bnݲ~9Co߾vwh.ws >x a?W~[cpyϫS?D1/¿yxnӱ'jdm~olxdJQ?uK˗o{'RAhڴ5?>\-^j~a-8pє1/|3@`l Afݺfzl]c4hEర00V^}U흾{n}WR%ԭ[7Y.=>˕+Ck9O?Zj^/? JeGFh-`VZ3m&>4"*9\g>wQ>]t M&Л0aB^13V * GG}Z#-AV4-!gnIS+%Ԥ^ȻyLEǻg0=h X*|G MYM)IznvAw Cɠu0μ0hJ BkEi|\>kJIسeZ*TU'اFT| WfxZQ)(D9K/!pPR-"}eйZ_-βi ~Ίʰ,s1Į]jJ* 1 ̘1CPXl @DOapL\܀ lỌzpJ,|ns1s|SŇQ^=홟Ӝ +sIy8o;v,z꥝k=IėJ:4ҧYRA.>HDҴU KMĄtBB):qzꚌBF13.ZY' c1XT$UpWH}b5& 7D{%[K\Qd(/âGzy}%'/tXQp.sw^20RUv䋧rl6f$> FWրv}b?j @g2-,[VaUFcC껄rcNznjI@)QgNMv7RjzڿiꔡLF&"k)]iWNDN˸Nhщ%fj3=@2}W}L兖SNi >| \S*I2d+)چWg )9)(>f dz'Dӊ%%̵G"eo;eΝ#qV!fON4Y.|>x=F@>~哋>Lq u/>"^(=Wrx݌ɾ߸`ZV 滋n6iQrd,TM1ctH)\yW˴.1Ɔpl|jfTk;[ -3f@1H1[e5qE-[B7T<iD5~3/\PhKwNʐq' \$eCHCv+s]d"|̉-'tiњ\t=kd'ۚ_jN[b2ӥZN M[]@zl%.iÇ7$W-DEyqwZhFRSi,9X^-9KE r/% (Yϛ8c%ߑ7LKtF3= X;weTf'_8C5ׂ+{M;_yljѯ)'tc͝_ȥ++qT6e32ҵ{K,E)?(JN^Ҽ8s!SJ>0=odZ,xzX@능e?e+ղf)kWo{K?t}1G,Slk:ci,k]j7*"}5kLY{ J)bڒ㭪o30)͂\UOW6f313YuRXb:Ff:u*hJ' QϬ6iQf`!uE0m}Ĭh:QcTT(Wڙx5c_~Y;ݸ6paC6m?>y[yRȴ}ҤI~H<BYoOBXk~_sdYy߂9۔ @)E1I)}~n8` sNGHO>Ӣ#i1QNz ڗ&=1[)B>~6۽AKE]f#ڵ_&aտ2."-6|)d$nl3lnf>C)N1* N[CzB3 *goL}=uQϘT{y9ߟzKNDI1~ߏziԲ/kE'Ezt|V={V?M{s1G2Wm[LI%|9%” :OQy(9w!hvPuKìGÌ_y1%w 9]߫5}gz7n $lJeV$NiVfm\L2@ W@?S;L2uk/p+A@B*vKbIfu#\a"-^|x>F?:<$fhɛ[|x10ݙT/cyz8 *ڒ^Bh/=-`7mdxYK}%G /3>"h%)ӧ%eNƋK[z*Y@IDATy{rq8|k Ԃ}UU P9CpCPݟЦvNt3uĂi.Nʫji=_RVU37+V2sδОċTֵGP5 >B9Q5m*=Rbw@~eͽt .~  c3[wy"uYsEݵ^%%u>XXԫbuIU}*cO a[n(m`#"> Np Z@=ٜމT(Jbl+2DH/%ӥ+W}G݂ƕq-T]>BPҳ-YƒsǸ{ G#_K-lTVM@EJȶ$)X h\%_/OT{>{03ܼn=[}VsXPYhI=Cv1CwhMQr:zVbkr{ɔGKYns{az|\_Ox@DjS>7/-pLcJճwݿ497b5w.7-=YXW~'ܯy)ō %{THiPpSӑjgn\EU :FpA6/j߯]8[s`wunH~AN-E B-*c΂R [dET^ KAeUXjŻ|y#X*rr{+B@@_2rftDW75if2C;wuXAnbyaц^F1'|XsmYn騈Ays(1G"YXD2qbwC:V*d X˴W;ǧ_~NYΊyKb) c˫)Sᙁz !&N=hgӸ8-(2sًԵX9ZPcz > S쭂dLx_9:1 UQN> I:'Eqp~(9m `iKE`̞6`jX53&G;$3kZd(KhjB<a5?7CSqvSG=]`xlYf\39ڽ$+iY}䢁$NuoL,kBT4ꖯmg@ǏGZ. -6n TI!*cvHA !0c >ܢbeK uAʒ fZөJ\*]H( )qHQ@W 6\qRT pRJBaEɱ ҉(9r8Ǝ;t+(uNH32nQSjHc!A]Ȏ@bdR]{>M*zANLLw5KXr/JŠM`* ZqN[n̦&j qDԔZTA(9|HG@bRqFY (щIXB덤:Kх/`6Agf*{E!S 'k妲5R֜Vv5BU4!A $╝j3|}$ $٠I (9ڥ{C5!A^`j=z(|D@,9u+'X8]H($%cM%g $I?XsQv:)) 'ƌ0<2bɱѧ&-=2ZmzիY.p>^hbtu*tO*95kt` uQrocQrr\9s >>^qTPU UuL&[~*#F%‘]!wԯ*&$t-,/9SD2 0}t]%ҹtRhĒSh,~AaX|j)k̅!"؋0qTW.cLtQԫWwXEqءs,Eqr%n/e˖ Xrshq&ʻNLVXĜ9OA6S eSZz739:1ݢ6l88p9.%KDJ,8:/L87C q^!L2߀QJa5%'C 9! dS0 /ʳ3Ё 88#(93TͨRΈO?($R/t[v408e Z91ˆ @2{[9&p%g^:|}o)rrXQr,G\\`!۶m_L%'o|lutdX,9\שZoRG)w!A ;ϞՔ-$e4jQwXEqءs,Ʃ0{ `OЊS~}iƞ^@@,9ycCw6 m׾94UEH0E fpUIJU9li^hiɑx돚(9%{Xr\rVh;v(匀Xrr{|=K+;"3 F %.*uc]u 7ڝͫ9@]Ēc%dnBϞ=[S]m̉%6HjǃjoY<=p_ۚ>rȮxfl@&U 04 fsCEG,9MQrK(JK] ?(_9 5fdJXr4S61xB 0^ԡCxs=-+Kب=ܹ?fku%ǺxloTrB:--e1ӧOcժU7n0%NH?bp~TLuJ3wK>]H庘'ЊSLtYٹsft˔#(94Z+RllK!; SLAhh( ,"bɱ~D]mf_Q#12Ұ_6\ btZ7ea<;NP3kPh"k[XQrK)/7AxV֦ôPH]Yڡ,>]nmX*ʮ]VH'OB\T4^8ih*KjX j($$D ""µmEpE*lltUsssCӦM]_6c19 K㍝3q|^,^<3R%q{ 49iH#`F5*Xz;qڨR*k3|S^=:8'(97d09+p0m4áC:,,Xrc7v5a%ݳ,ލ;Y'8-[hȒ>v)JwY| 4uiYp!}Q;FX(.b).ֽVGǫu*vDѣ9A-N@zj*.l܀[B?U'ɔx?0]٬;8w*9rd1wQrz+jIkNʮm6-jHKN1 GM𾑎ww0%Ͳv$ߠӮ];±%DZϡ%ǡiMII.Yʂ%F?Ks[@(%<3JO08WND#.(qtJJK?&UpWnLLLĞ={Dɱ񨊒cp+Txƺ"9sBjx ԽXr mm0C?EDqqvsӱH hܤyĹ|=dL̪&JU+uOKŋ$jck 2aaa6D7b1og.8zԫ47""Ϙh6Xϕ;)m:y/݃k@?CNj˗Gj՜J.GFG1旖(9<ݻI?ƹb _-Gugbal5Bqf2q̑6"z4= Y0WO0KYxޑxۏ(9lٲZFQr\fm.(8Gm΋0`>Ēc>,m҃dOpF{HOPU+!l)^µCr?ZnD ǜi ХKg!e%!1 ,]ΪBƯ*VKmŒce-wmMq2n$7?/܆7GJ~F=%450fs Ow_#..e* ԊS{(9SZU̙3%#0uTc̘1oLZ+d߮H̰>G{ax{vC@C2T ? pmS o la`xۙkxx_s8Y ̧֭CRЬY3a}Dɱ>.ݣ(9.=V>##&M߯lֱtdqĒcqAje¼/Z*׬6"E()&-Bje0eEZ\Tk%ڴi32VW**9;vEaKjTgbz8qO=%y0c]mm`OohPh䉓ca( ~Y,xJߚVΐIK}>trOsյ05j@N|[qUAu7?…# J_wިWp$l  IiRVŀ|t1?ۣ1ά^ӧ döw'-DOxJy*01SVh,|/}nc6((˖-àAпL2sm5!!׮]sEP"pPNuD X?a>X)1/܂{`bS\Č3+`x7:8>>>hݺu2! JNˊ3?'NDݺuѷo_7.-|,@iO,z/"ňKN33鞎xWc^6 "2WX0J{a㋃P܇vCڙ '{i?GFJQܵvZ-Έʛ%cRPMB?~267ӞXrg,I`xnN\ௗ1Zzj仕tEr3Q0o;3i]P\=`<… ѫW/DEEz{8yժUR&`+Tpp(J+eo퍱cZs*jmIr`!1ըL?+wiVfRjqؠ(5ZLi]E={j4 eTWﰲ0W 8j±c\Jfн믿ƣ> ___w(=  ztܤRlWUN\TCƶX%;eJUqBC [ⴲRG#Uߨ'= ~d7c>p5͛5:q@-]`cG*Ԯ][,92V~CDD|I+*]YXqײJy~^ Fz7W̍$7b/_6 #b~L o LZ'݆l*Kދ+bY&:wYwҰӭ[7cA ۴(9EL/6b)6@|9r$*UQL%ǙF3wYU ºJL":w.NNnP!;_k⸪rinplK4/:<c1?K`]xݠUdҥs-n>==k֬Qrg,\Zr ~uY,,$w^-.98pR⌵v FPqNn18Q{ʲfJLwb4?dSL-wX_3f ~8Ym۶isQr,q%HEAJȑ#iFOqwyY" ۜ$Pk_v5BI{5׆"4#GfDaye~#plD؇oyJ̞A>+ȕhq06 ˯bwwwʼx'=K\̅0)@m pѦ(9O.-̮ՙCJ0AV~LWgE@,9:y|ꎆx`j,<=VhL,vrd!Ԣa ֟sɬ2VY{{ +ql,D?_SJ3gBJ >^Pe}jxZ|glRΞ{b @|mQ߿=A#D@l-9*MdX N7|b?#N!Q0&a*gkXMWb8wuBKF_ UuM.%iF+m;VϜQuK}o&Uo*I% xoN4}o&SiZ(իWc߾}cIUm/_~V* S@4"\}߯(7k̼KkN@rr2&N{ "FwbsefaZW53R A;p& \mX:BCg*:xB<8$݋{)VBꏺ*_WRR( mq Ĉ+җ\.v{w7$w޾}ݻ}3z]}~VM{PnV]Ԯ=~hԲ~e!zdj:G/N]؞@`tر#-[L D#"j ju֥.]&l7#!N; še7n3 ta+Ŭsp/UA@,9UAsآm{J`z4=]-KY#6骞☮ݴi ⍓x($FF0'L9J?yB-h|V]fЪ_Ҳ}'i~u#+3p@BޚX7jÇ7Z#]S5*; (9"@e tMT v\+w)+#PA8p???g3mfzntWfkV:WngE[k.vE J SF(~=1cr!lb"eKSIVKmˇvK)vz6M~[O* .F_ĥ'$$rΏЈYh8iii*"D8Vc#r :t?ÁwӧÇGuIA@,9$K<:32o#=ā71+rX [vl؃^=|fm>LmM;vDǐ?[E)}8uM9IIS(')r5{BK:vK/}9AU)7=XYpH֦˾oҥKu]x.7QꫯNwyofy:t(0+; йsgLCmR!Z3" Pɠ7loW`?g`[_kjV:7e2ric]KI\DAI~ǹ[BBUdx"+͡LcAaj eg9  ~ԯy]ںujX>u1fl8DJ؂3E}z좎lidU|IeˎEjJfKD dQr 9.(8DGG aÆyFB@ܴiVT I׽(%u-?́rG:# r{굢)z<&;A9yfr_?eqrĊ7be)FLEETOyϯ(|~x0uħEPԹq5_ciŊJ]T#!Y 6P{QXX g*((P/ {oйA8+% k Oa | TfDbB#;ӯ+v%L;+]T- ?e敚O3)1-(%;OfRA9gJ!z!?veE&כ"uWN 5 a SjCYJɤPV<2 uSgK&\^CFRR" Jq#ZUʋ!+V(yE>"=en +&߯٧~xgct4s% (+ [\jJt4{"eX0]VwI6S517nPvcٳg\7n\S,T4~t֍~U(5-Om'cB@,9I UzvTd:xP) p [iV4^<Β"j6%&Ib)ZDl1j̔#QKV l29θ/ kXtߟ~nvgtGY DɩXR@yٳT6EklܸQ1!&GijKgMM0z >N7W selCHjY5!?f`Bsl6`„mM"֛/݁Uu0{RJ@_?.jU%ۙ#еkWab~V> tMDḰBn_hp3s փt`91(?ZΆw97+Aބ<>L:_tt) )%"s:3Wy45 8u$xeѾ&+_D#e*c(75(7Pt '2|f:UI&B~c ?cA&x?<-Hhݺ`kL'e-OKe0 ކ .@ү^Ñ8t}ʊ# ~/O=6Ty_E7tcBdϞ=m6ꪫ*" (9Ol[hAVK@`ĉ+TĒ#/͛ ;!ЫW/c/B{oΘ1C߿e䄱%XᱭXrXq\hܤ <`]_EO.aU߿b|7իo-U QrTcKة]t}*Z Kũo4%C 0>>>Ծ}{Qr8bW/ 7@=}ʅ䄼1$rXc H+\M4i}Gĉ)%%hŕN< Li PKu~^.ΗWFStu"SO*cȝyW(88ƎK͚5{NOY +55U]U[뮻N%gVxB {~#wmGb$5. @2?ɕ~eCx@.pF;!* W*rssiɒ%oooFzYl*G@u3g̘uFP,ˠ\E$33S 5UVu09r{U;[n>}(7s%~t˰c→ P( &}UK;wTY7nrh^&MV^"E@כ7oVK.U iU=={63FYG|o~S=,:" 3<+Pb~@z)oާ?ց TH)$X^=}z>Cfߋ޸N:4}tyt'R0͎AY;"jܷp MP͚rAdbw? M0f<q?ȗLtaS߾}6mڴ2'fA?cbB+&VL-Z()c x4?d؍/X01Ɂ&-)7bb}V\V!RѣGk%u=[MwuvMڵ3nIy<֭kzGcưBjbE;c;Lfb&0W\aMD'4]s5&V|M0X5]~:VCW  vÆ զ@E=)1XVUSj/RErQ7knz[HxWbr['blxIcM @+" ^ˈh/@ۧ^As rA(\-ZDGN.Mpn>Zxy{`Ϟ=͊"-7!NW izܪUJ NK+d}p X"h*ECWB8դM6tQ#O GH>6[J!50YHEh &niEѻRv+v=2*€{Aj 5|6qE؋p".mϑgPqQr쏩hg௎EI\THn"KGO !Ċ Ӻukebw*3Pk()V+<܀ʼn1ڥ⊐4SׁLW(e{PVkNjKA;`w `+H)CH A- <ȼGY-K{1X~7qmYn<*!F@Ҙ!T̝;`-Bg%+;zV{"o(xuLckڀ {XJpXJyysݺuvDHyJrMws NE:YE*\οYU: Dz)YURqـJy0 .ԍ^nsNa9Oݓ9*fkbS*/窄pO'ޔ "oؔzbځ0SF mt`Ghu.^\X!(i,vLa`[ue終[.}lyU 5q_k E8o-_RY,{:}og$EpcQT>0`ceD_D:0_fh.fuWd[^ϘmYS5uO 4Pu|cjA$2m5L2Z19"#?`!_Spp/T`)8GIsPݻ>kov(Aebhe]?\4`qy^|.P%6lVUyoϚTpC!t6@=Ujm^rHLNűNFz15'DN3'Sq Z@wE P-+" q]D* ĸ F Ds`C3%]G ~$+w޹~}΀)AQ`CDJ#" =d@8GQ-)[34ltl¢:OF(9F iKemf,1^$z@Cըہ;&70$b1H \\~}7l@L=j(m.En½nkH[Qr2Ҏ !%A1LEFkIK ۈJ@vJ\`wYue1qTyVZˑxbx⨻p@ 00n,mJ+ ۈJ,9ܷotz1Fhaĉ*hTUc(50W\qvVDUQr*3ܶm[>}!A2cǎ)6t@%@!M1:u"l!CTT V j׮;bSNQJ(9!$ W_rĜ[Cp~I&5{s# @9?!!| //@e˖rv۷o'Xr~as=G5! JNp \ñtRs4O>tZzj(Ēc jԨ'w7R˖-)((H\`uls`iݺ5 >ܱ7r@큁*?wp*m{آE Y? r~c'M*垦H Mw}nv. >͘1CȖ*1T,)jLIs5f]U {UEg" g/62 K Ėc`Y5k۷Oq‹bĈtΨȊS1_bW Jm믿/vBZ. ~kO! %\BHnݹs'}5G*4`O7o~Gwy}Y1y@Ē,]2y'J`[f ָ &3<#;`Qr*1>UE*vX K0{v 4xiˍ]n#k^7oa߿?-Ni (9.\|$.kU *YjJ0h__ߪU,W vFKu#8}Ձ[֊'9Ǐ˫DɱrMಶlٲlϮ zTPP@ %$$(Z sZ%w#x^B y`Q@wV{D{bbbUǵ:;?S\+ÇSxx8M6;9i9t(9N͡g `77ߤ'OZeKgwkq!/[KvlbwqrXKB+d+ʄcC[7&eRUEKMY[,(T[5i޽ۭNHp%'x iua֚° 0(LhmTkěL6ֶ~OWwKu*ݮ4jҤzȷz hx_~Nnn?YC4Rq+TUB!B9rZOwlB!~UԀ`@IgB[aL0_Hs?,Qتq{J񩌒֝ٔOёB={ȮB_X/V즯KI9ԥq@W:gUjr(<+uqQx-醞ͩbc;N}gѤfr5|J$DP3I`,i㈦fl8Dw +on+YYYYʊdɲxb|W?БW(f[RMz`Kb 2%riZ/Ng)p|10Jl:Ju,(k2/vu;|aXLsfQ^uײV4 23_ 3RvyҜ ăef"Ή^U 죏].x @C[5(5pKGn7Xq>9?= hHȫ{ 7ҷ`,,j8Fs֖O~K%:02""7ر1)YٔJ9)ɔjO'Q/(~tߌ@mXl پ\G1܀ ~ύRJOh„ 8###iԱcG[sSeqΝ8}KvllVtnՂ*A-jw-9DG_L] ׀Hg] ϯa)é NQ Ֆ cIs:Ɲ-] }y{Խi $ X IG7tbUS= B+W޽{{8,pKs7ԕ{*s-~~(636iw`_5P睡Ķ8Iq`ee'&RƉJ!ɻbױ9J#7(Z[زܨp^I;iʔ)= NVS~}GtG;vВ%K]T }l6e Am ;βڈxۏҏ#˛[QаiPm*l)ɥL&H'v߻e23KX|<+6 ]?Y3xHg 6 hL&T_9>@f^eM;ѣ:R]i˖-j qL- jg#! JER l=JXEt6>DŽ̈́ݩA͚ÊMzu)$>š6%@Vj4.b.'9Ya*+O\tkV :g[O2C[Ь1?Kܥsѣi۶mԮ];wVU!JNU뜏xҘA6XwЧLƴal oْ0 |]Qα٣G)l7w1m˴`>ޯLa>tb-T9pAggJpЦJE 7s[Rov?M{;mֲ7kfxj=͉? h^j?v3Us;}پE4oVr}(eVb8KwVVJϚ5vΤ)|9GA.qy*aP9((9p\lAZ$*#tK9o^=ta{li ?ڼp+]LLh.NQ2od>BktSʎ~c^`ώ ceR0XՏPSv]7nJFEɩxri "Lɷ~0cuT7ZS@@䬵=[n@ӥ+Y ҮqW=pܼy(93dRF lLϳo}Ɩ䕱XyZҼMSxx.=+BUOXmyWu6T^:29+/- <<|x#[*}n;_uQvi -=Ϫ[\/J G7sH;_A#mꂀTr7^vffб"Cv`?%BQ̉?إGp v8&Ѹ/3miRtFr>Ah@ٴi-_hMVHؑqSꃧ5f{cN-Ms ~^EGS)Xk.9))tj:J;rtw CJOYȅuKIBTb7;N8_zzWSa`#/ۂRsXw3G" :̈́1:S |[ZenlمMK#8ɚEVdlOpiA0qFQr 3"[8]qnBF*~wkG)cn]p[' (u8{Z9(ȆFZqA $~ch1\{ϟO؅^(x࠻DSs/f\ӇmUgX8~d={Q@.Wp|*Jٿ5MѬXHL`?-Ӵ);E# %gÆ b #4G 骮ō y,NOZG^AԠo a:h01hwlgzTƵ7''ORqׅVX^n4m۶JAQr

s.Y0s7z5kA/#JTΞ{E'nqt:2 U|q ǎe]F駟zJ)JY||wP[-go9B]^Nᝯ+)<>YMuK` l1 0aDg" g/."{7`Rkd o4m!JF{4˥m"ŗPX6cPq>-i޽#Tӈ/2?/N}||o/?DOqw_ W,E"p@ ǐop{Mk`al0Fwަ?-C@,95ܨl>Bޞha 1'Yud+Ábj;a.,b_j1 hÞ=)nPx. 0˅ПNht֗uPrwnU/T&)(lO_EEώbv0u 4w#5ח߲Bm` 9 a\;A%=!g;35t$-PsjnzK V!O-'3tLzGAȱҰaC2d}WR%$rhNzc{ӃCڛZr OS<[6;/c c5Xrjm-ࢆXnL;3PZPD`JѦ-5r!y됈WNgLi3f7 ),nOe/ 6noVTҧOvUT ')eg^&^7؜^&̥ҩ٘1Tw!Dj`1 K? c1X5Xrjm5@2uk?N=ZzbZjԧr\֐cșpD4hR+I4ܞ4mڴ[iFI3?v.'9c;u6)35vP?d <5c v1c1XbLE@@,95Q:4vˁo,JMN(r7l4hHo~uk(5~ wɅ誫85Qrػn9JW=)ZG=X8`58S⻬G9ϳ֌QcԜK)VDp4PrĒh %-sZ˪F ǣ?w7sX!Ŋݬn1;nಶen}+?䔅;'ШS GflO.QA-|p c c q"M[ECNLt sW\_i<`Rt.D+N/c:4wEO}RKOrY%ǭq;+>Bj/w]HK],'r)~$+8ABacW c˜iĘbl1ƞĩ]^kg4c1X N8=3{=uEOԲY@ɀAc iVQ1JIL6k$tcy.c sAڰa M%XI*"}1S u@je_Ь-GQ@XX # cԛƘ@@,9@U괅ok<=0Ĝ >od91:M?_N(|nbgWݺuMz5GjONg=V#:坛^z{@!ꚝEc1a rE85s xJ8&qSQLh}qy,,Ҹ15ӡ<ʫ+qVLll}tWS}OGW\viw386M;_Au;uf#1a 1zX#ws@D'5 6ꃧimC(HS}'r iYS#bP K/d~hViXzvTzmglw߿6on]+䔂D lT6}`uD!ËCPݺTc{K=Gc1Xꩥ1s=怈 `oĒcoD>k?L&r #̊]IaXD\jثZmO4#;S&QtS~9sGԘ3gt~QrʅH T0`+{]}}7R M8wdWV+?9X-73h\Ct6lݭPJ&:rQFѼyqCx7&f)b]ژi#h`7;'\a#vcy100'D{! {!)C?AG)YtƴkGa.41LD0n*i9mmȑqFJJJrQrl#?юi*~q{,!tlNB4P5h yUHzjL1cM0H9!"E{+{(!:(B XZAL4pAqyxa#,Yd.f;2]{i(9$+2޿zzDg3752Fu\R`L1cnk "=K=P:BWRzЅnj/Nkf7]rz]l׵t-x04 L3eV_i㊣'mvO0It?=zQG6|l9D ZS/^`L1c^00'07D" "(B`֣cބ M@uˬ$ZyumJ[n֑vv~z::f\È#h…T\\삭Xib8I w[O ˅G8qdtY+"nAj A%3Vo3mw|n..cnڬ7Ws\!!@\Nzz:^ FzDɱXM !/kGw|_kW5s`N`n`A@,9AOKwRbj6yy ; w}<6DSt0oft'") w\;/cfҘ#ɉD &B`?{)G.`V 9Sr q0sPB-1GTzIN4m~:iZlIlٲ>Qri4ЗGiCx*bnqBNBc9A*%*5@wJ7jAQYqs dH/xNf^Lq9 ubs u-뛵e6S@x&{```E@,9ELB`t(%jn Mˮj"@-vL4s!wsvn'եO>fWc9XNe]za1soML΋#``.`Ns!A%Ixvݾ 5 -)RjU-2}k7@X\rl$m{߭ާ?1\JJ >|%oѢBGD`?{(ߗjΜ?صm^/' ```A@,9AKB` v=I -R(Xqx{vh m%ysOحrN:m`N5\XKri5~"sBO99$"TT-)[_ oȬȻp^n)'!Z WuT׮]Œ(??XijE#EKa5a.`N`n`h%A2%2hIYk N0)#GY~Fu{G U+:rlM9x$LplmsE#j8rf#ՙ4>[Yd$A@^00ʏ, KDsUi›,͢#/:G߮Oa-e |M@DfXKrWâsΔJtw5IkR_-hի+P@170G9b)A"@KNE2e!M_:zlw Ô͚u`$4YP^Qq $wkM?Gbr v*;%FԵI4E9fo9B!FIypM00fA&:(z\y(3^0fҬ֟vEwκDm4R)`kT{sb| (9Ekū9b@`.aNA@,9AIB*`rn6٨ozGV2]c呰a{k(9.7l`{"d:M͢XBR{Nr#070G0W􂹄9TA!`exFfu/{rU-iSA#c.oHA0UX9JQr4H cIKEh͡64j=>1{u{8f4}ŚkSȉtA\ 昈 ` qW+NHb)s7l}y/h׭G[͘BRf ]sUѿ8+GWNh?C|`-]̘̕S[c"-UAln㤱c^5,$:t֝/xLS|=lnr5,44BBB*!(9K/7'P w:Ҳr)wE KdǜC@CH[C`THt_Dޗ/'?>jQ^`K~T X*oalUˋ9 P0W0g6M6+9&"B@,9БsKx5 *)VqOR[rD"G/FPakWQr\uF`qXr̕AվT```s sLD(䔇~_,7ꐟ GQg 39f/0mnfcǎY w54g4J/@l/ymY1$.}O`\B@,9e!#C8ڃVvo`?vH^rP|%ecC(s&W&8s挫6TE)'CNoE?<|CssG/%G(m~gkSD"$36 %W^ymNK3_+ɷڈb6NR ϗ'59ǙCD 3;C`n!I暈 Pb) 9n b:KqLCə"ky_>j"4kX[RS] %lC8"tjt<=_SA/8D!M000D[%:rGxizL&#A2xy{Q@p`)%'+adeeQQQQe0lYQr ;4jXbZ6' 6k;"G7C6+00DKNYq[M=o!na`'^=u,f]56,˻g(9wqY%fxp@^N"@VHn3yH;j  5Abɱ@2{xoi˦r7@ca^;H_ ܇dKBYP] ..k,tBS*2)hA)ߦPo)91g0w0Ok"@Y%,d-Xa1f8E2g*3@IDATV9J s$PϜ4_ðQ:JiW_΂Wk%*,rtޕ4_ Sû"#D^W~Ϫ^sr s sMD( 䔅rpYaP| >۬s"sb:Swg4JsLqM%ݔZk#fr Otv\*W ت ƫ˥ KZ0.b @{OX$E]:K򉈡1cKnUM\s3wEPޑ=1oBdQoQkԜ ֪=ZW| 暈 ` BGYC5 eHb^mvQQ) 22OyPЫȷn#2;GY[VRU|?YĤ/6, vt^n+u.sr:О?-@?)M!]rl%*>/r)۴q)y]T"-I1c良;)}lncq}7塞+)},6&7ѡgYVN>Om?)P oJoK{x1 8!:bk8h](=O}=JG^N~"s S@Uۏ7Zol#ec96(h;ߣ'Qtoԉ{ܹ?xpZM6 ;C-a߿ fV֚#jM :"p,ѩ#`?,t yL'K&uxw=d8=.^JcΨo .NOh85}~\|55zd25ysu?(N$} )s? CϨSïF]Yx9墡;-oXrd"TZaZ Y[/Bowpf*KTLx\e8K/v3ό5)x"BR(2Qs_Rsg)gZJ=ECpk =-2SRxሺ-Zc/^O(qIvα$yGv1RVU_1w09&"XrBFB5>kXy)8n8g^^Ajb;Ԏ\c^}-ġfz\ږgyP֦eX k!)M6[?RGP<|  }̍[v,{E@p/Mژyg^ꨧ;690` X礒eC$hXI 7B;`yǢɵ]BqPh% c=ⅯvD,9eZoZ^brm6E*^s=%=g2U`fX+c}rx->jy@ Ti]:J{WTޟ]vQ)ҋ{oJ7|nػٚl;%{ws7R#_# _ 1iNkPPm?RT {-pOA9[y, mRu n4ZvQ~IlމI0߇ZX$ml%P Jɹ%B~O' (25|`=/s]VA~ʲޚDwHekԣ3rU2i2o=Dgc{ (VyǎBlJe 励a*&14xXiok. t>\3wS13 w('> S8IJ;'SqqjAZ'ickYE@NKgj+}~:9\n*Kt~Mվ ᤯^Q9tlނs5I&m|:xw |y;*Вjr @YkƘ>| ʨG>=+ v7mA1=~ .a.m;+X:e٤+$r-SOG2i:ϵg9Xq:#|bD\CZ}r)7]m»x"I5} rS혿|WR!N hgd\O*tNd2w?h3UfM$B,Mh'Lҙ+g?{^P@O1-4r$7D6i&ۘ:bǫjcZr5A s `@,0Bo. '¢|amttcF/2yT?79H+FP33[:/1-n헚nHQt`h’B(4/`*T (B`aC#{woΟd TeLhpIID}(?*tSΟ[k/?}ޤrЩ<̜Uv. SX?|)BPkNwG=\8w!P'+_|FiH{ȝ,;~&իM_߮CM\I-t \| 2hV`Y} r [ p S򭺩2+uNa*;Ͳ~?> ӵQmy%&(v5 aW,Hp'Z`)X. \seGLa?rkS_Eΰ4T1 qFZ$x*QhÑK1# j\nedPx+Cڹ:ٜ1ApwEӇU7dv׳j+0f Ҫ|>ծmʧ P)pxàJϦXܫVcGNwā"%s5Eb&$;v#h6\9L8bN#6J8MKE}{J|Qh u꼉cD3ܳ"fiLV{vʙ/`ƌ6:bWʹW,D屫x`(9Nڸ$/ _М8crI7f%#‴ O˅b`` iMk"-Ēc o"9/WX׵~| repg|{ZnWAct)М&֬ſ 㔗Gu-(2.A,9 C+X==_Ay8((s Ƙ"߭! k>{ TԒMi9Utx9{Qԯ\t98CIkP%ZIOkRTC]*g<0fԊUlsjaK-dd#ls\ZԈ̴tD("dRXh[mWxߩԠJg<(98P V'O5%T> !XAc/xck"=Ēc9f 5h1\!"sXtO]y֖seyG@c%km  ΀\/6 t3˅vk-44bɱwv'|.l%u$ysXizUq9  [m̭֮+ENH+FX)EΕala#{1[`%ݮթLY2قM[ Oӄqc.V.nCD!vv`r8 4 OA.+3;`Lala V!~QGBU9)s'02SRT2ꚇq*]v+Ugx t'Ȇ ` ƌ`LalaK=t=BؕC*^t4ʶw'|0[HVӕf Jq5\> zOUYD5 NØAS`rT/c)JS@d&'Q7VP@ ֝O%'M&ӲZt9'o+B =)Ikc cKDpXr!$!^>~jJYIK@$\Ӡ*ޮ_ 9LmuLqkl]%T4=YٓD, %cU 0`١ 7e0^FX1<0DgK3(9 ,9פu8Evɧ PCiD:pUd+c! J/js}Gu^RkFSL~чdꁃj`h xL gPs!MJFNG+"XC Bݨvuo9B9ˊ Qr~QmQҹLZ_Oykz!J_/*0sX˜E@,9"%YCIՊLKڌD"%gZqr%l8(s%X% OhZZီ@iiBéCE6Y160Fc+`S"3%GlWlFx1OFX *ܒ_V[׮B)wkSP`L`l`h!Kӿv\>[%2YI%{t<Խ1ote#8(p{51iEݘ"C@_+8mzL,!AĒ Zr=nPʔ.E?٧;?ZRS'ѝ$AۨzLd_/MCCPE(9D9iNT-/%t+ 1Vd0?G K-dd+`Q/If)ss*ԡ^U:u+ɹܭS'2Jp{YZ x"E@kVLizg3lo&+)[&|xi8e ``<ԣ)a  WВs!x洓_XgoǗ>ӿ%>Bg9*B0!ݭWfޟV~Mkpi}g c\0V0f0vD &XC3>Lۢ;&anlyym=ٯѿO+şZ*J? _ۘY[utͪHGc `,hhcPOWK+hɹ ĵ-=Ivv}RR ˗A$+)Ec5EU?FwV50%ǀoUj{2/kꐶuٔ„ϲbtxhaOgЧn=;` 16CC̙34j(z;Mdz TlZ4R(krށm|8{M060FnԐ*GkSp $ iOo=Bk]tQ:nl6IkQ%&yc9)l(wksX ]v+6ejڴ)ZΝK?M͠WU!(t7o헍E}>Gߛ ȋ׷1-KːЬ&u_^ݨ: iUy%H# ){7uWR 3f{f@+G[(JOj~i9r$1nھ};]{T'WK+hɹ"NtLJj~s[SJe{!:!~qkVR'+Ia8D1\BڴjՊBXxI+tծDc6kVS^Fl?S-}m. i|/ b) zr=3) (^]x*O" 8G /pSK\jV,̛Ӛ}@2||DNE/\@ uڕׯ77têDŭ0KQׇ1tt?;Go '}>6/G_C"@IKNI! ʣu]r$Z)1@㧷ݽthC=RڕUR]y1PDٳ'4~x={6UVyvn鱉+lV^iBzSɓes~/ѧ[&{ ?)B@,9@ZX‹4Ӷ(E¶C7SR{̄9V5d,a<5nlG?yӦMSi)))``̘1Twza+ajګdKnЇEo}1 " CߡїWR}>Gc D@,9DSʲh|}|[;$g\%w'C y:w0q_&,lH~1U,("(9wnө]v%mӦMt-\*Պ-O|/\QfCiCRT:DJ]+E}C/5A⦦"F@,9FTʳ jBl$2>MwSϝCYWl%Ç)ёGl.B$/a!仟! Ju {N >VXXUVfMUmSi,3Kj(D }>41}. ̑B@,9BRqטl-3k<:}L+CXѹPPlrF_EO %G,9@Vʴc&#'n)Ney kk3t5y:g3ui\_;Q6QrSѵlmӦ t_)5J;*%╉8}eїSʘy]@@. E^yu\$Lc2W1jFEHZ$Sݸ(_USDF*2-- B}{QҾvx?_Ϧ|\L=|xB:glxb, \wC%TVёB@,9BVuWef5iqr kRѨΊbw;#悾BzB_B@,9BZcG~?j׎E;NstK/փlY/yWb+Wđ{Wy ]f}4~d"D:wF@?믿W^Ծ}{Zt)ըQyټz }:Z{]nO`71E/oNۿjXAXs`>0 }kV]  5U)l_i3G럽j:2NEu'ˆ¡龮 ud7V;rs[% C J!}?~XnEpkK =udHD1d^);ƍGo6}gTL -k'Pbj&xocGsiWmyFj_M*;-0삡W`'XDg" +;YYݻ75k֌fϚI3V1:׎Iӷ%o<|P\] .#nS":}G4яYۺeQrtϧѣGN'N'|Rw6WO MdY%*׸>U0;G6;h7 S =V}r. +#dֺe#%ϻ3;;n>}z؎9o[\+PtTHװr5 9X޹ v/r1Ҏm}Pq` %e}A ٳG)8 4k,,"t)k:ɫI+Cfr]ZԱza~j\v"#I̦VqVT@g i^$,jWw9ׯIm…Էo_?nͥW`ُէ>G/M_, Ӹ$kXŋ٪O,j~|#P0v.05}@oD_# _@߾}J[.͞=fo>M'ӳur DglI;q]pSLNxz+9m,-ܝDm^ .gn ɃtVH 5&rI={-[FM4 惚>Tᕰj{zSlS\06;=O)aCd %'&9n EVi7X K~ _4$[w5tBUaVZ4(~dzԤjEN:_dWԥ^|5ƲWѽ?/%Yi3֮ɂR!P_Μ+)L.!cuBO]G֥o9$2H6Œ} ٳ'Њ+J1\#XZ:*c)xItwzd)g܌d=5 LM"S+`fM{Qpq͚%صH իWsRtsZ0A˺QfΫxRr=5i"ڱSrÙ5_ykĹ7>#/8E>E/L[G2E{ce0m0/w6!bw0ab?~\1/ZH=t ]a7TlVRc!O?i)I(NfJMS*'Fո&}=R.\T^J72@Dj%N{o$3{$kp=zʕ+&iX K1\ݽN\ƴ|EUڴ; /NnJ)۷Q%x[׷Ӷ{\BTz촏/'OL#F%TXD+“˄{߿uWjkgn;JN[OS(~=oݚ f&J9pZpB[\eUOѽ?-u4zTN8l0Qr^#G('Ľ] r,뛅t(%ڞjNUPDNoSdl Uiݖ*rP0)pNl9lх94I~]X)V"@cI𰅋ŪR0 z: v{47"P`M(v'R QWjђk(rn cڙm[,+čCҍyr|zuԯI rtwV.]Д<^ -Yakܸ8/\-oV*Ԛu7ە|^;elىmޒ*5jH؂NmTfrdԂkȚf|?X̳v쳎(9qB ))wﮘ\PuR`I[:@ٙ#ޞ9NyUb&נ_!2J=t0Ϲ=(uoTUjYUnҳ|vSpdgcN܋4m(9UJcԢ"-R%}r+g:pp /5W#[.N rYBB9zCtRth"U'{5&[M8[oVH?X[,~o{9YV#tҠX>%3Wt*>љ%TvڇKw/kP Tv]**m%fA8p48@0nX_Mj^q\?*ڸm90Mj>M*:4%'i˖18Qb9nG\Pb7=AH_Vt@;krݲ]>_>OUBS.BoLijElAuoL7r\e6m x g4L۳EL}>,$; ]M4]D"1:F8ګC}e?]^k׾Wa;ᣜv" 83+uwfĴGDh{3g{"mRY ?A СCIvWKc.*U} x>frȷuj`EL OtBjń;X JcpEdPWKҥ%j\xL<^8*)73)+-.1HlTKmSdw_FS;v.зwnYvΰOtf믧- s4%'ND( {s~\racIA3]ӄGdk6\W?sKs)b)*W>0q2 0'I#@:K8v>;WMFyBPT~8I,+?l+sU”Rse$kT:f-@=m)E..2\1a*WPF+e- (9F yzMYYYʂSfMC6Wyk5bvU8k"9aA*9,@sX(_e3S6TyJT8_MfBu4B7;G^'h!Wae1]۴SʑyySr +|4for* X CFd-LҬԤԣ \vWZw~<ե X$[+[=Pq{" &.XQw c@?r\dbcwo9 18%4Ǚx2g LTJb|ro R:sUsb ,8fPx&` l\qHd v &A"Ae˖ E'lU۶m_{Nvh!upĒepҡCӠA78ŀ؜f`;v옽S 5$נr#;SOСCo~+V ,fgyڵkGGVIcXr[=(mLe QFzb&"Qr Fx /]n Q) ?Z|=)_=ٳG)8M4 6䈒c:Qr<.|ĉIG*TXnݺ|@Ϸ8*oKopfgg\JHn ҦMSu\Eb~4m4TZՋw[yXrQfwy'5nv:u'aKKp9994d[8ynC\W<6V޵k=*G^KwܡνCXrl!# uƍ -[ (^Fzj@."AƭJ`z饗pG7Zk"Kz8u|eÆ u)VZrAT5j" kDB⋴w^_($DXWoٱcGzה%rÆ ^Wn&# s4;B"}V^Mͣ6m0>B%G7.]J"GQ0& s?+1knEab ޶|>|8-[Νm-G<@hh(U\YO-;(9NT|*{xUG մ(]F{~am| ^E@,9^73b R3% 5 iW^yN>^@@t#/_XFТO4 u'@KNvf]xN믿hԳgOWa_!%̙3>|(9nFgU |]M~ /@cǎUYƵ)wMSLiӦQ>}U+V7(:"/%Ǎ]V1N7,E͒m*z^/2uUQjxxrE@,9{ԩS +"Fv丩 ~ǔQTctRbT^WOs~WB^(K@rqfF`~G#'?0`B2PӧeC@7A?M%J1`cV/X+ɼyW_v܂fqKaRaCя?H_A]/P2e]?+k)JK@bVZD)_@LN1Ν;G}7|$"xx]ߗ=fABRu?66V=@ r4s`j%G+CTn֭[G W7A % ~W4yd2dk+ճ@JDɱ*b)!ܹj{뗰4%h}gJ6leff=An@@,9nрE?\ "h%= I|KX\]Mbr=|WA%ǝh?*M7dIm\B@=(9%6//{=Rz$+bɱsW]uJwyIW(b)x駟V1}`R9rk(Ur䤦r+QrJO?DN'xȥ(9{ws=G3f̰}\@@,9.'"y6 E'ՖjAZl;!A8MZvzgW=~p\G0b.֭r!pێtCU*8mə3gJv9qDЂRb BkM@k F5XC_U^zQ \n2ڵkG7pegg뮯R u҅jժEwqiv/|I:v!ޣYfx8hƍ4aڶm*`IIIiyXr,) "P% k+uwu,͵}*gǎj,jc L.aaaNǹ)TZY}*^؅]Js;gS%mk)ßڭ}-NqO)ýz+IUsٽwtoiH hi}5Pp% <ܫb7} AՈ'p*X 1 %A%^o" {e:s /0'=e /ժUS}rӕ%Khܹ+&8 ^[d|ݻV^,-gkyxbq_~QF\ccn~( u+S2|شo^][@$sbiӦ|Έ㹠.\H7Kb0朱(w2k,9B1i.ue˱~zU7Bsq|6/87bq)czsz.*z̞=[mxPwk&XXX;uTPg6g~%ޤI5oms\q1Z`mGk#By(t &i&~ 4/&~4Jݩ D&~6o͛7ī1&^e/<H&.ƗE>p?~4:gӦM&~6U\ gN?~0C/P;1u٭/&~,IaCV0v/ڦOUΊ]L;wV磾X1qzǷ8^ݛMP/,ZLi0Տ-[41}jOFCPo(O>)23G507^s5j>;9xX4Us<#mN|Lj8ӠALxjoؚos,A"~x7hJ|/Sr"&g^4 2^P^j5aˁWd.7JvX}:s/ڀb DhA9CRܥeRB%=zRT0M,/~P}25IHH)rWL𪹚"2lcKNesA`ݘPMM`;WTZ$=RG93߯U,ව|f+4KeQLe˖-\q4;9Gfn;j;#Ș2R՝`j7, ^Bv9z:.nwM|#?|G2NbKZlp 'ھL5֙5\Gs}:c uo iK0v38ہtW]1+0K,{QERl`ۍ5>ʥ詧*,hhKJ]8*= nfKIK rB"_ z7 2My\N4 S=(]a@Pn]F!xncdΝ/%UO+Ͷ9I- ]cDpqቪ5ٸ{юj Y%m`a,X185Ѫk(fr]w/h|w<㷄/fO;r%0\\fVu;s47N"k$Gx^YGU0h~O4v̓Oj~Ԙ!ZO<, x{^w&T@s @VxC%m9{Wc3 ~k^ q®o*OMAl>Q_Xcּ`% R`Ӗ2(9Xi,N]iOsćl#GbmiMo\0~g{$7+XDoY9rܘϸ̿mGhs5ld l*9%EymkA͈ċX2$ %/ 7r@J6!ep%\A8IQI%L`B}*w^b l jfPϖ0:TżL" ~K0я;9@YujY!V2 ^0V}Ag k*i%Ǡ$iߣ>VQ\ ˗/WUV'I'N(`]S1v@4kkA:k}:Qr8b@} W1cXͭ':[F,/b|/` ::_ψ?j7\js8j;kaD3%*m"Y\3W^N;SË3j٩`9E̗ygߜvLkHoa.8rδ7} l9;5 ߶YT'+Vb(_0S6KuU<8G+G\Wc8 a@10񊽺*6\?Q4ZEBIMvۻv>AJ^`^B[eP60Ø5 bK*o7qZY(98U?oƑusD:N^xx1B>%U4[@b;e9 +GKR{`-nXlo;_?{m-7}I"_:ŢeiZ,(%~Aqs%{L'p!tU6p{b$@Ok:j,S>w}vW\\[-:8!UssV3!g'δWH\4Wbk':|FZ=ǯ Μ&x wnl]7o[ jO wc38Tr)$A0=Q[mŏ A2^@b[绺A灘\SX-WmUL|m)ΖeSlƶ{ȡX+ČiN+N/''x1ȣ6ھ>m>EE!A}0!]K<%Y-Bb VݐXyCD$%-?KzwU#SD={{3v]*L^ !xv+#XrÛ8A#pxNCX,~vDɱ)~`Šr 3[s# 6=XZV&qd9dvjd&9J؄˯%8݇,&!Ic Q1 R Ac@) l˸;![1O~TT,9P#,B2hܭ ,6bLKo9z!eQD6Hܜq:#Hk"v:jӇ니 `Xr,q6wk 1r"Xr|iI!i߷Hj (96zFe 0}JT " b`AbLdndd7hĈ*'|J] q7jn c9'9);"19:u*hтKv΍.C@,9NL7tqL$}Ỿ0丒'7wؑ唰q4%G,9lp*r = u'ZV-X!'AJwwAc}0g̘A7pK 8%ǻ^gϞMգ={GgC |עx(1b FcVXc٥,9x@s*W^yfWB3Ē].\HCÇw}'O 1~[x` J~ZƍQFV.A%G|3`A|ݛ֯_]F!Cаa'M%݈JyA@XAmڴi*7CKP%!""B ds̡N:bR!KЇ?^85oʕ+E\!RĚ뒗,%H,`icQٺu+P6vD93%'{8m%Ǭ/$@9w5{}gO+eg񾫌m@ߏr HL" N07񒹹n.U\C@mB֭[S*UWA(䈻ZQ\7ޠX7nҫjI=@ݩr*UVUe96nܨ\LVE"F999%-FJXr&Abq?o|M3fuXj` X@@`鲆mJ[ߏD\h߾=!?^LEw K(9@R( \Fô~)h kŒɓ'+egђ?j-Uډt /զMEÎRA]rJ(9[dZܹsIkOGxUrCp%Wnժnl8F`튍e˖w! c KkxٞA@˸"sǎ\qp u)V/^Lpѣ&I?A#M7D| =~":IgiTw<NICg E$-JdzƎ UnhWUceX̊P0 XC\uU4gynS(9>5Ã+XA8I Z=L2_P͚5顇#G:s3}vM3PfVHi3J"ʆPB DQǺUhTTB9e 0uNVw3>Iy8;,eH%iݑۺRl@|t*ԋ U"(T N yl6ix=B 0.8<.\%3ǁDEE3 ^xjժEI'F!GS3Kٜ퉴hOeQ4[Y:"3m=)JB T|e% niR @z,6=e]4ER] N||<͛7*T ݂͒#JNP7>蕜7*:3Xr\%ԩCȩӳgO;/"#;MZ~p]R)2,z5NoОIF-jĺXpJ|T9o\&e)҂]hh:ʚz$q\\͟?*V5Y> JW`8@ 蕜k)"8XrAAիW_rf!h!>)v~y>e-Ue8[jƏcԨ,J*As|mG-+= 2֘^UȺ~Deb`qB(!e˖P%R./䰒#VX (EgذaԵkW4i 0 p\lc4}ae}}[*uIfijtSgX|18 ctZbUJTJ{V("sI1LPCOp]&5'V>_x*}Ԁ:eD9vRp'66ֈՔ: IgA[Fmj!f"^ s}ѠA?~8Z岂}-4Fу"j Xw4mLLrR7Rrrh2?C(;9:L2:ӦV,UbxPgFgR+den=1e5=Zz{Szws%(pZpP0Jݤ(9FjuPҁĒcu%GHJPEhΘ1ch޽sck@ nkK ;ۘ,!&WY>U|?[Ef3,[ CFv. ʕlLڹ VQqNF٩g/=;)kZ lVjQ'f[ӵ>3߇#Kw-N ^ A! 5HPWAyf5#GDpXrY<0Aѹ뮻h4qĠ a/좷l̼|zg3z9șű|]xE=; % ^ b PD,(ł(6lH"UW齅 齇†/ yI^^;x3g={=y+ R%YX*,El }bvs 7e^1;W݇+V f~m䋤;w-+7L&9&*IΩSФI<""f[j#G ڶm%Ko߾?bf@ 4__£xП>U[-yw~H9 ݿq< {B'F BpQF;h0t_L//ރKOV1N0s$I]q)+H%Mv01~ ݻwGZرcUV^9#P%G+WW+0P%T 19%dK/G~mЋ~ heI`g`tۺEN)93GӘ%2ҚpkԻ СE i!8pz1}̙3>U'uv5ʘ4t| 9"Lݧ/l9LVIT ɪE_B!5=gx{x/.w>ј?>4cq؊8qI? P).;pw+hՊ*IPe7QaKN1TÙ"ŋ1k,)V870-xC0֌ g;ezoٌ8`{C0'2܍ϱBkj\/,

h#@jTh#E!oH.k{)QPC,~x_$nd(^P_|Gyˉ5tA@L8 -5=i 6v\WkAdbLDFFFJd׷"$ǰx #JCAd<9\ vW+ jnKB?U]vROⵥ0sH[LRq ߌ[o.?mZ#G9AG60;Ģ5HDZHm_l|? o}ao3CEHHH0p-U>? R֐a@]̀`[Хl߾4s!## 0jժBJSAIB"zLTqjjV.v~{A={¯Y3qީTA w#jI;}W8.wx0#Hu9cFfaCU%!iWڴ;@ӑ߲%@[eH>ڵkzjɪC򿚅rP"Ao<6༾l?>뢌$eו?"!2!`:"fÞoȵ<Ӗ1F$>$͂$ǂR%1+ǎԲ(NgŊҔ)nrPlرviYy*^XēL`BcP[>TP$99ZewjzS]M>]rS%)q^^F 6MZy<WﯶƏ!PXXEMXY[#wJU'+nX5 i0yؒcKdT%EUlɩ8l̙3%)w}gCš&vq|R5ڌe$402΋֦  mt_7WPq5y>|H!0&B*&4D#:CjLr,ੱ%VY͛7G}3gѵkWXDv[.p)tKt]黭"0T4Մ_3%oFwgPbwM1 <.#@jr˹rL},s|#9$¢W}|P2й#B Ⲧ?fbpqqڲZ1qu"*(1ɩ{ʢ;gw5^^Kc?n[W1ND&fÊzޱ V9nk~6p1s @UY݃Io3˹YZL ZHk$\!޶ jt+dqŬun>xKKnZшw##$k3Q˄[rTUl`4R¢=pR> *yDzsq_Vd'`r"ncH\SDHXX LJ'~u.\4˒×qXXwuȕن-*/ۯT5S|[}XQ; SY Cu$r yV.@i̔8[PieD>x/ $͜8T%5Y-i׾=֝n*Ox(l T#` pL):Ǡ* RL\"@V*Υ=3eB۝H>)fĦeZ֊zޱlH#0VTLmPo)k(y0(m Jo "9j\3&*l'Wqd-1K#84KNqc88:k'Jq83h@KzPM\x@#PIOOZך]NdKrɧƒ<(ukhO6mu ZAxwјrot_]ʩQ͛ KNllP"9lQ $*USxME,NjV.f 뎗 8ֶEu*@e+PȚfϦ7UO\ KyBdggːyH[r,54]HFxV2yɊCJ[\ԋ;|tysKB|T 9=7jV9VʧY:׫?7G,=rY!2F^VɱVˎ;>S7"IFwSUq‚apS*Sh9F>^u"A\6!we~LrLa7&9[sq`2ǧWDfM$A]q@!6@jU 8 +بEh6њ7#cW5UwDJ8pWK]!SdKNʝ DFlp}+ -}}h"@՜J Yrs-08|AH|^xFLpLN`t@x$gdc?EqQUNQ yWSp|ݱ7,7c @LP*.PɡTLr ukYuؒcykj*3{9 "S}C 'juKժ߷,!u|AFɡ k\][u$\ָ0eAcrʂ {âоB::fR3Ǯj`66 ^m#TF.Dra#˪䰻rN-9O/ʅ68}w.A5 WBxjf PaKAaWTEr]M{r'@FNbд}W5jzj WBJf u( #`,$uR&9Zu^Wu$ycl܇6䖖/\4qͱxzG//GO/efH,+hP"Șq"H8`֫Zoϛcrʏ!Pzhk:>n)|x8x*; x(~Ѷ>u~"N*F&93_@U71}dtC 77999pttnISM!:p%6Y9"I0r3rru),dn'3FQ sm/.TErl) AC <6A. <.} soQ4Fl=#?r 6Xoc ck2QuD\ը00n3WRPKIr,v: Ba8ksp3)M"19/&`÷#2qppС57atG(C{vą(-.’$9)mܞ(Vܲ :]# F)=cLJ&ܔ92! VLr *DŦ$ErV@$MܒXTGrXx T41=yvW`-Oe;e + N}ёCJVNAuJpsE\* G,90-MI >`8wfϞ-[t>5m۶ o_w#Uz /7ܟe# (%DzгK˔.$4ɱ3pz!no;mVɡzޒGDZN>ߌ! K6˗?bĉӧl2̙37otMU$2O#F܌l@ܽQ/'%G~X̵$ۨGah5n!}jߑɱScH1䰌q/Uk׮_|QZZhVmtWqLrt-6wߊ2F˃@FNt:%,FJwU{O BX{S{|ߤj&tg w5*Lrtw)KW2!T.ͥ6y7'իWu>ʿŵznG2% K,_Tl3G:E3ɼviײSpjA#H=ٙ:,? )S?ƻ*XcPNZ#9$kc#;EAX>[p[z+Wx|t%Jw|osBv^ߌ wozccp|ݻwcΝ N2 <[ʅ|C!@Lr v{?ěo#F`ڴiR$gǎ裏мys4h.EAL>wW`г)#?󈍍EA}ԯ_gϞ}$UGr;$ʅ$Gwnd 2C@5KdQi7eV#鍜]& }%j/|p D=BF [KmkևK.vq5D!oOJf$õMV.5Y:D<{E!e F9aK  &9F=~8fΜ/BJ$C ;v,+*%%W0,ߌ jFUZ -[kAmڴeKΒ%K$/~MTTH ,, ڵ+7Js|:/DVV"?w[U$ D&9aKhq;]-8+IrlDDu' J K΂F.f$*q9݇) i?k ;UAp2NKS)ȉ)?<@,̿Gr{Ky:YN g' ZY9¶Z$| ]pwpQc{K;/3BHNtt.)#5jԐ,5Z+!~AqP\NQEsdrCRTe.((}AB UErHrJBB3:!`FDV:#!=[qrś4s/w/'l yH9R/ KO9)*9G(uE? w~􌨺##vzS~锴'.>$]>'Ta_VNK&"vF%/@+ͺ^1,xh%U4Fɡq2ĠA_yU`.]*ٳqqqȀ?z-̚5 Νòe /`Rۤ$[QI%#E7ԩS8q$Qݭ[7IҚ>CPn7-n+wDcS>UqGr9~;i/A8ܜ{%2ĹAnC Ӹ85i/abz>..Qޛ?OoY*OK;vi#3nncC_3vD=F#[h3{{Lyroͦ:u%umJ*k*YOzӦMY&H|aÆ>}:&O,Ž:tSލ?aÆ!T(i,ڙ?M&/#GеkWJ7@VU)/_ÇXM W_} 0|Dmʕ+5y($k| >Ta?mvapգGA9oFӢBI>+*('YXl*og*'f .Xg KcM]-Uz`b;0 W |6~[lWl][oF[yrY$ w1Ȑ$tp&4r_#rJŶw=4GNBtރ*iAuHld5 p"~}˖PJ]L%Ç䮦(9)9"&Eiɩ;7 aԟ$84FmCuDls?rjKy)'CFteVm҅[;$ɔ˖C֢ KJ$ZMbccS)Ni˃Ρ|mCmFn+_vf؞I. L`,tθ]M/d* x(y+5.1 W$z%ÅhTIr8UEV?Γcyj 3 vExCjFj12A(I_`$G-Γ$)A]:v!:CoHك #PX]4hq PSLrLa!T0Uْg@W8d5}ܐ/W7#P"zM{*ڞϸ0Yr]VǠ:CoPb#.%GW]ihZ'=jȌӬmFdJ5sW;_(#`@ؒc@U~)Ց???7]첦;gKN):##:D5K odq&$]i1_$M.4$gA&r=3B-9B^}׽KS2P*nRɌy@HeT{E!u|l@(thJ)5raS4FV@VVuk̭R"R{/G+jSzW4FyH4 W$D@Ig0%''^ra*Ց)s}TTTE}[Ö ZSSJH?\27 FMZj[ @bH-Śvf{9Jeqf YrWTGrhewudš–"畄@۠*q@Y6U@Rdy?g mSd\Pa50QuC@6sLnxq#dkV>z侍$:h#M]B :ݍEUFH$-9FZ]V$FP2T˃[rʃ+}pFԮ/̿W¨vHCJCS#IMGGG899%4ǢFJ$Ǣn㊝ [r*_.37 \ Iz7mï]USQ(#`$ȚjF_Ee橖  7>tșG$!y#saB %26 U%\YraL &9"9ՒXdO~x̥Y  IA =Tˎ\Q4# +y U-VFd+p40FBH |]V$Ν;j4\0//-%\Е7DKu$Qm WIK_hӪ%zꅭ[*#`LHFՌ긶*I Pau%[rʋ +]I.k[x2C0o##>^$^8ց?a$_-ƍ={e˖Xt1DՌz}WQ=sA׮]SѬyeE-9eE+-$4XZjE@7Ĝ?_.#*Z&$N D{m۶ȑ#SFuCFF}`ĉ g}T3A^6űU h"$9AAALr{ߗj% 6n\FZx9EcB!{ᢢwԉ@vZ:._)(rHLXA*kƼy$O|Dv>c$'ߏ+\cYr.FLr Aڂ,\C x@%uˑv>)\T3‑m(Q.^{DlLǏǬY$iӦ!))I93zEH+ܙ%9쮦u'n%ՊTO aYEoh$e AV'rt;O_6J";+ɲ"!~b;@)IH4nZj:T>n޼oJ{F~l)EuѧO rS /un[O!Ewww!Az ~dٙ1cLn_"t[rJUer=*9-9*Yh#65)F6}P+'H r{&j wۂ9ӽ k%-kquu/Yv|Mɍx~)XVP"\9E•zD@$4Dj\!* xB[|H1K. 1u>&9qlOMe]6{)8 "nKSL+W0zhL4 !܇ 5٫丹//]R棨hC\]F#-p%6Pޜ["KĜU*Yœy#%cڔ~A.rowhk-W gϖVRBѱcǢI&Xz^NԋQb%9.%Dcc̼Aj;̅04B3a?ǐCohG@K\ԁW*e?x.6EYOTjԨhܸ1 :tHTh4U&9l1ҭg>%KY/K>q~bjkgJ%2s|RnE!Nfx{IyOT|,6Vru}ҥKqaX[[]vx'Qa-vWu5Y1acJILrYT1H<3t*mRqoV"6'Գ"_XnٍލпIbo}Nxiٲ%vڅ˗cbx*ܿ $B҄zCj999&D<4c#c:vZ4k ع{7l:Ն\:WCpGErLC~z~Q1¢e|1LiQ4C3uTI,=;bT0=W,a1Mt&9c #PS2\_ꫯbРADNMnjd{a#,xojf5j?N:ԩS&=fFʕ9W WݕTOrׯϖ0/n;ѣG̙3ϗ{B::O*6".C.U蹮xI( 1V;w`<Ѧb_n9.¯O Rke)ܹs%\*ۛotżxG}!j[{C!+ T0=[r,`Mp gUVR~iv֕xlw=8_c|4%"5(m11e3'6'#Ø:ZzDŽy撕r޼y_ѤIl\ԍH{+zLrZjj*'2;͌3^<:*<à";$]Bnk3ǰJ)[W긶y3oA sׄ*3kiٹ9ժI2Ii~m4!+/daNs 5kՓzȠj(:f[rlLx駟d_xxxiįtk>؏[Q,};i# i#p\u/REӨ.؍ S]ժUzj,YVn̵lu56TOrZj8}1ׁmpL / -^;ݻ7~gn̙RmY@dOhk?mE 6B֭b[YE[.TOq9`+X"ȫ9 YuȪٿ=z\96LUOrp29d5BLژ.^v!,, {ň#2tw[|'\}W '6m tLk'NED݇τ Jenb0cP+_ݴPEvZIv+6mڤ s@awLr5nܘ-9{W9&! ܹSJHV"C7IuO&#lWi%S,' ^۵KH76جsQ m$Iu]tA߾}%wLfs]97Nd9w(_F@h#"@*RzBAdO醤k?%޲>} ֜)wOFr\ED #$\ m0kCi4BqVn@#!2 j(+)%%'Naꪝ# IJ + &9Z#r%h#19ڈ.3f ~m,]V6d۱>F {/+z /%:C2_O.. &ga-릩hh;D+[۶m1k,+е*c li1 аaCXYYq&f#ߌzyʘrrr$ٳgnj3@ҹ(ߍ^ k 8z=Vq>j`k}q#B^:O(q1$2pUXEm 8wG& έtlz3H#_5 @љ6m{=zM#/oȒCI&9zZ^=;v Fjkk[+m(D\֭Ȏvܷ\ K_!r"VrVŎ Nm\I/=h;y Fį'[+A6btjy(mdM4 ,֭[R8}ALL%+,,$25o\R\Pa##2cǎ&UvSrbv)u [%7; Ey$^KVN?PORQzUB_Sq|_Ǐ/?t6by)IνՑsW˚)߮cnNWn9 B]-L'.sA/~[zD5FRSJL}4m˗/>!@'Oիl2Ift`w5[3s1MUrؒo I&܊(ɧ9ɋ?ۡv5GƠpRY>{*8b9OCm?E[Q cw@R8#o[+H_c+,<+#4 >ƍVb?`߿Gu֒a!j&$3 &9KI$~$9)(*d@kDpjXpDXv9t?~h+{39Pק)ſW )2P;Dn⾸n-BGn!R)xxo=");}Rݻȍv% F M~Fp!""*8&%&&"++ˤɃ3?h*첦7И>%!$裏?4K#O'_Z~3ҔdK:]￸i#nTepM:d>vfՋĎbPPBk/^aR4^^^زe FaÆ?/!ךdɡ&,1&9ի3DLr~ܝnBiC  ̚+j-L >F";GǢXs|໖ ־ ) spA2 ڨi#/;7%qpk zӿI@!.$~gA|4%vLB oooEBL駟FzzzQT_gmm_}kqP+h-)%%G 2QߛÇGyEU\*N k`$x짭Jl"mw =z73gE@|PK*+y .^1o3D|:z4)i"@DHʣ}>jr*P=LSٳgήf\% %(SNa4Ț19&4I-h %`\$GI J\ԍux{߫W/cݺupppP L<߱$uGs!W6/ Xi-7E\}C)a]*J}>NٳGzyѹsgPBz) K ~0"}["q`:t(nܸ! +.bH14R3iJtol*"AĢ#ĹXw:wl,dxqOXow$31 q+](:qb.mХ^58܍y)ڗn' ISIXH5m|F%W\ES\/"99s ##ԩSY(tIEG&: 4LnRɹ~zE_WLrXp"9x_\u&9Ǝ+_,@IDAT;wpr>j>[l hX:7ēm뢨8:!uK9riE\]`kl [mmoc+TA* B$!CVܬL \!׳䤦 /%vV~UOtnѱS+xꩧиqc}_VKtZ֥Ku4ғڑ#G?E!_g H5;!,Znm3)&9eEΣ7䤠?Ho1%z&8Z֒; kHqRɤFF S_2a)|&"Ӑ+IK%x:; H゠h$Xh^*w01?\¯{/YBqs,x A j䚦^^zIr*|&ZѵkW$7!jޒ$ $TT%ԎTEOuB$ۢ窯i?a|5= =a-jaxZW"L,?T%,6ٹZXelĚ Vr;x,?zKE<1[gfb# U ]͛'|[hFeXuHbH[nR-3YrbccJL]x8K"`$%Mj,j-J-Pq9/Z欶y=zA^|EM%̋H I#WD@x )eP}NpUTKQ (vܭi$=S tIPL衫E3f &M$899U4/ԑ,:rY8N{/// jq0)fIe_~)(WWE|QGbzZOq:DH8T="B}yɪ"we5R\#C˜I%4\} $)vWuqY>|K!@R} f񡒛g["'Xd6DDl-\(Aa'H+̜<$B;qiBa-/su@-KCfxhZME<m΅+믿y*&1v޼y3:v(ILرCzbS Hpab$FIq9LrIfJ"+V p1,d!A*?>w[p3)MDdHE%CX~y!m'H=C! RDr FXgI7|@AytPrgR!$V<`eXړ4/ [RO˛(&&Fr0Nfa<!!!8q"(#$#AWqեrYc RFWDF5.!@<(9UH.E%Dz=55U"6mڴ+Zw6`#,3Yfa׮]Xnq`W%/Vbs-p&9%&9&,f=(&9X."rVV[sbZ#)) K,aA ZZْcBC2PCޓrM|$f͘1 .4ZX:,<`5@kvxH~,g/^ ~cD[VoAMPPR[.C+رc%ii=P $ȚC\ԉYƺcܸqRrGy2&eA`KN$wG!J 9.CG}W\_S!+ʉ$p\N 8YavW3%OHV?'d3`KN?ŋAK.CT]<; 4u=)`w@rpQfk_XhB6;w ../s]ի'|ӧO[s NNNXz /h]}!@$b&) #䔀b`` j֬q9%d]ͼWܹs:e'$.F8\ʇ% d!IiJEȿʅLrt@9,>P؄-9滨3ϠiӦRIJG.?XD 0;ڧ~j{ᅬ_GUNI>Uw_Lrt\ʗ~lڴIa? r]p33DHsQ^{ Gb0ɩ5/J9u֕X[?8%G?8r/w`Spy|7ߐ#: ,>l!l.e'OIIٳcs믿fTʁ[rZ&9 _A4UV-];lɹ%nqLy*Im߾eOc `SE ggg,YHW5jMfi~Y7KaK&Io߾|GX&9 kţ>* PK.IJC *Bܻ̪&-|z;ST\[rJsL/"ɸv $ż K)ENp.\.=Kܹsl2lݺQ)2a2ǧBIN!H WL:E%0^TÖ^M%r1?%K9];>ydAꃊ{ׯV7e=Y¨0)x|J!/TLIZhQqõT\OEljKDBZr-rf!O %7[2mdog{x95]P +"Wz rtͧ;vCo܅ W5*5z:uB@@$駟j۰aCI~f5vc Lrr}&9:.$i?}bIƩ8x\IFTrFA /^ip!bc-[ҳ-O Fٹ(N![x:NW45<Ѥ'V}0׬4l) Zmw^ٳGae+}!~*U+VcҳaM*uB}rYcKV$SRիWСCZEHH΋77,{/G!fDDy!BM/ L 6eͿ)MոBt"6F`ζӂI6AUR!u|pݪp .%#19%cdf͒؊c(u)K; P4jm`~AJߖC2*Z *?`_hd&e52+]oIbÙAjkZzO#LJléWӭ)Yh9Jھg:( TBzE@TElXOvzg?r]v=gӳ" ]H=@ w=&I6ݙݙ{˧*TUsSnazqo#;%dѷ)S(U9ީG-9Xi+##̙C?}b2Yb?#cj ]wE?!46ؒ 'y4ꊔ¬ LtҏێHc݄[H[ #`At)gRw.RWrg c/-FZG=bh4f`:uIh.m^'t*:oPΝi J2\}77c%nz뭷$׵Qш@e"$Y4{R`j_:{m:CLCWHT~c勤Arp7é1@rH;H<!$)iH'mM齕{9h`J<1]78釧\˖i '|RVx.䰫_~!eeeI\ײo'^y6m>x`Q۷R|9#&91itd`s.ksFѽϾJ{~~F7uc{sI8khD1ѐ/~DE#I   Ȏ3/.wɚ}t7kҵ=l8&|裏xų%]\C')Ys>5-M:UD& 0ڇsJF 4 #٧k[KwP֫DԳS‚cK9ZK+R|VG"H oMm)*,Bb~?(Z M q?d-EJ*._ ]THۏSՎTRX(YfF "xa)Ϙ.I?N_?@ ǿeD%i|׶ؒc.}3pELg#!`1qXK.kJu-{fKz3cXJp{wnzEĬsIfVJô(jw9] 8 jDX ȋB(@"CA6TVQY~>.ڳiznf/C^ء~"˛5 {F??i[?ӠH0Gb^ycr̥V$8q4WǸ7!`s]G֟N5! SOђ%Kj@rPQQzjhxN(;[ʫ1= E76h]Yk(UTQd\,$R!:Aʨ @ ҟH'VUZFE'OcY]7P[Ai"@: hרWv"V,b&3._|ܦQo;j(~=%%\8^d8&!.4yd)+yov.h=]k{q{վ`dߙT u&9P#Ϣ_.og+E|Nǿ[+.⇪PBEpa&fM_4zp#x-9QL22GZZηݓiSWkW_ggђ'U, [rLA?8L'%y ARZcGs@=zЫX>RUVK%,9,3%g3tVּyAЄ^R@ 7;BݺQ(PdBFAFΝ:c]&% r"%۽"n k+agHҀtH^ $Z\h1ez9:ĽF+ҥ kN 6l幀c+G~^|E^ rؒ@|X3>ez7/ bQ̝頕+ϛKwJl7P!^KpvDb"پW_MQ4C/He7#C;ӏw_BK']@rX܏'|"rB4 \ODׅ_K;^z]yuQBBzU%+UA 9vƼ9WҌV̢.|GB:u]OI2ەWPV<.aiCA,Χ2!ÅcrܫsܹsyOAr`,G$gݺuR^׷-]wE1grؒc> 9K3NP3"f3zXs; .ˆ86j;xX;ܮ֯ )MDBrˎOKψwA)aAEF;vNNafw5Jw{+x ҵNLbݖ/__wyD.+ؒmux anBw֛م(E aMUwZ ^Ok6B]i 9vMB&<$J^멸_-9CBD׏]4#߅XHj uX#19q!$/Қ.XRX:‘bzuգu*ݰFH8 _vj:N#^#7 6 30%F>>]v&<19UʩSqnv[7hj@R-^Xӵ|O-[ЦMKjc%t0T~EKk<0g#UR+Whztzv//C_M9TշY~D>RFB&>}FX׃`K{u ~roG|udڵ+}g>8$yz}y68WBkNc$Pb>^Fa;SgPhڅMUwE N#|ŕt%} JAӋѿ7Jnwӧ=&!)5^rU 8ףolcaY#$Kڲel^'A[n!¢ɢF)Ը}x+#%F@rb/7eRnwEzë=@rp'82:^:u5TJmxd]bVW^km&9pV"N8>ٲYՌDvH0h Zz !tMҢs1~O4V$;$eaEkHζy4J}EвQXG)Pae8TRCf̠핧y[FKDwo"۝R.݁޺zAm>lXDžjG.//Foɤh{yv0jE1~JDu=ڵI"c{ ؝'̑釭87m#F,ۿa2ؒcvܙO'?[#Q$gi O+Ҙ.I"M?n S4tV9vDտ75g`JX,t +Ts˖"tR3f v$lY֮]k>iXi\#V3S 1$/} ]= S%Y'ITȘs))SM|gESrB w iX\ƋŻ8&z0`mٲ^ 2N_[2Qx9/tod bӥ-퐡sc^)u$}]ܨ$vc4[ṛ OiHNie5"8A#,o^((޺Swox-ݺQPd=9gj7 Diq.aw5 w`K9uMBJ=}I4nVL!ӐOAt[!L):O vZѭ79ꆌ{OK_n8H'Ε7bՌ@U]'ϑ2IocK95ܮ]; eFe-''Ǎ=0O 9]<𴞘`Ƿuz+ιJze՛{DSXL4=&QWHaw5#ѭ;33JJJZ`KQ!1A \k (Q3'SێPf^=4^m@dZPpUcMZ@?lͤ 9IhESnm䄙c9qfA-9fD~ .#Xyv,9gϞ5c׸O)H?i:P֑uUjo WhPw7|T 9o kR;!(뺍1^PWHUc|+`Ky$ǽEEEIZɩfg}xZ7Nr.OR+yԺ'[q691={'"n댸'd9WH~3.@-92}Ƶ5 Kxa1>pdIN- 9vYŃۇIο *=Գo# UD Kg":}*ٍfaK?5&\Y^^?q 1’ Y=w$u ^kZڍ.c-!n%95 }&foޅ1K-uDZ3w!|?}bַUN y*dxzukMdᘜ: ـ?^X| &9wN oNzq`d geKx oJr˦Rº'JQxt #,@x>zVF)|Ogaw5mPݩS$ &9 ]&9U2t _G1o'g\j$ Lr7phn%9l&k8ɔ!qL+gSpUfrF)"1_deIєTZV0lq ;o($ǼZС# ^G 9;[7_l1NzzM"*`. GȒ_LG(Ȑ B& sBuD9O3O;$l9lB>01jAEyyy놖$szs(B^(ZVmF*mhDZ3d "jq/%쮦끫c+GéWqQ5j;wN} Ux$P.EQKuGu0 ypDm}Aݛlɱ2આլ`Bdtk$GJ3 #`.'9;NK7~f&/E H#yjK/^sxY.[9kNWoF"К|'Xts!JKKs]QɺRsFѡu0k;Q`hh1O8_QNkұ9ȳRe*sw\zQfeeO^og)嫪[H7Ñ1@s׽{APG0:bп7|{=j7Zx2Mi $'==>{$cd޽Ӓ%Kl]Vwϡ_~|A?~q\NrbÂU`= KoUҩ߱9Ҍ-7S*;S$xj:P$c cGg@iMnzM/pZ,\>km%l /nFoЇc!{˹^lժ6|?cp^ m| m2S73d\)V̓RΝc 9Mbr_:CZdΝwR0\Jr RAWR"UoWQS 4>Y/4b&]Ga=ԕvh/`"?;Kr HN\xˡՒsUW kphȐz}k)'_/Lr9%W>קW;y/ $zӧM\_/_g3uuYkAkd\yaݻ[;Er` a9ܿӟG*B"RsVІ$E?;H-D?p $@OX;?ud sǽhqsF02d)BBBtn=]*P 򤄞SWSd\[nNHdBdsQK{|k6S*Spi"9'\تTD)d9!2ȡ_J%֓_d4LIQz /2v(E/Ԫ MR{H^Ѕ{q=ӬYJP.]0㈗3gjJ 9\n`v>|87N6ӥ!LIIIt饗 ɑ pJ?#~ZnرCnj3$-/އ4W\qsÆ gIЗ,GO-嚺:l϶Emrhclj<8WVVF+V-[7tm}gϦ}Q^hĉ0p RrV.ڴiCXv޼yt뭷J1S}UVVJ/e'O={JqIX5G:ul޳ g`4l(׬Y#/ѣ% gqE='݃;B ^~Kꏫp:ۥxN_,~wa4|k= _Tw6vVgWUY@P AZ48ӷa9ܡ EÏϤwҽJ:qj5rmZN*+WV~dwtEIw=']{nl#9-ZQXI)A|o)ٻݔ%/8&M^r{9% />WqҹiӦI]J/3y>7u/9Β-47dXx?c`%##Cw駟dfT^^.&Z#|?A>3رc#FO,˚ڳ5tE\QϘ1cD qQFI\r vӮUH gi ;ӟgk bb*}c 1nRXw {.ڴ:+Fp= INIw}[o%%)Z<쮾j `I۷PF,*!T\q=]ՌxwG҃3%%EP綾^-[&}ɓ',9X~Fs嚻U6복{ё@rh O0+,X}~׮].pQA&@r ayРZ<&35[n &u7x#ZJgi&)pZֽg=X]\O R~{:"\ij8o''(!{~mU_ 0`@ü/hhY`4oHf:tJ=j;x[.uWRG)mKǕxv¯DƴߨݣT,`<-jg|[oJgY/ +QuaT]e[;3aQmuǼbC B.Ku✽H^f\Y/^\mMƦ l.jJ9~G.AV#СCʙ*pܰ![rhH}+>Y̴+gaË1tpmI/~ 9hC]6zO5>+EGr֒%!!Aʬ\R6R ,$(1q§ p/ԃ?ܽ'_lqe~’?zA0~xi_K{ơ7XcVvd^{FXAF}wJ?[Hll= 2QnxL.i,YoSFnmj:"R fK Rd5XӤŅxyO3we%LizHu9:MqNQb(.ۿ] Iu;'^ĽM:(#[ΌɁ4D}R2f‡3Dlo9Xr9Ү@`[x43 ͛L -E^x]+K˽e ְRN=_K#m9Kr`e<yꩧRe8+oHpp\|]Oԏ?ց +EGip&&G ZƓ)b6 dH7'JLl]qF]~3eZ=2NUv`ɁEhm8`D+Vly2_Ɂ 40Qfkd9{dOŵ VB~)[ΫxSZ37t_NMJӳާ'HEYo\dTM)*55U PW.zGܚ v/R|4^0ê쏣mpH^#_,؊ 9< ^aՑ38-2e{rZ)>SKqm{ёh@xڄzہ+RGT^p[ζ,Y{ 2N~%Zi-57#p.w}=&[\DWvGg=J $BL(_`ˑ#-eq4jD$'4_U X V]A;KZlCQw%)مqXjASKë).b-Kc U:.egCbҌӄ 7(8+剄E[~<-=Nv &=$GB+ QZV)f3Mcǎ|n*`&}R9lK2!^{J+2ßٽ pgC2:agҖT$^#X\`1@8t>C ,ք1(9hNlk)Ssjx/6gix/ hi eeqh@#$!t(O*ໍId 뮻hrl;, t?! pmU{Z{Zlmْ#L65^s@F4qŘvul?N׺ ܟȆ:J+}Q{W~aRo!9&m%fV#/ai x'TԖ3=uT#;APઊ@ )H ky-z#…[quçHm*PvuyyVq0MV T=- b Xm В}f oLoBѡGv.iLiToFɘ j} jY\(a ~Jw-eݮFf:^h*̼5w;!$4hCz{Z8\qF)+iZkx 4^9֬Zk (b mhk=[/:XsP&?kq_Ư+|?k@b=:˰!99Y`K@o[e\Sŕ۸.wiv *(j׿emj/E hB!j5Dצq25lÝ+DF`UHG9IpP5w5ecLKE,GC%eMm7 |V^'ǰh[ 1z/rm%AdAsSGPWCR=հ>y>7u/jm+))IjĉDܮ#Z05ܓ2Ax[{acࠝ5w5՞q^{%͵8P*L]XGC,+9 8yP2a}xXiƢ?uE_k`CK 3.%9I(Pi uC(#Y# `  HtBL͞IYC,9X~BӰ}5-9aʫْcګW,C 1/-9^Hå)>&H*M<ی-*.~=-9r%oc ;u&9ԀْcgU k$5LV$-9ަUcRtlIr+'jC%ETSYehvBL̠ M=9-HNkWGvvr1gT)к`$y#l1Vْc~`>|q xQpW/R rӻm,8ZoAr xiea"PGݓbB>T&qCd,!$G?,&1&ْcnn۶m^h/C$'92rQUN-,8JK+ #ʳ4̀@GRjl^j6q$H7`a[r}% i 2w%Glц_UIf+k˚:g`J<:U3bl x'8');-[r]aDLY'"&\$,C-9jXڂƴٵr֟;zrӽM4L51ss܁Q=TǜّI9bli/;~/`ڱ%Հphr iX Wi!5AJ钂B*\|s6@qN.Ґt` 9aKdÖ$3,Wd@L[rW ֮]ˮjv@ tżp.'9 a-:yRuwkdAEV:y" *Yw.p`/H$l fc_;KAbb">|׆dꫪ{ٳg+;K%Cr:&~F:j@C)<Ūal"`&'hJvVrn#4j8rLrGSSSْcIc akj =YXr,.Kv6&α_ ZA80]?Euj%U}:H u`9r! \0Wrx;aec'`|9L՞eY~PXF%ʇ,|3N~z5Az#J/:#\X SVVF'9n&9YZhACշb/I+؀ṍtKcMjBWbGF!gv4CZ8|r]atǺ@YW ,9c@8.|}uf޽;EEE[$lFr0kѬ-U.kX15>7`\'#jtT j' JuK wzG&9[4^SARRaBHOLr H/j?LrKj#^NWVكtFuw|,*6 o6FwA#G@ؒ?Z#֤¢LŗA&9.6maÆWԄ0L. h`J<}fj(7L5,' P;2hl[C<ʪj+Arn0x]MheK;v(d{Lr%g&9c@cc?vZ­$1ȤsbzYZG}S(n8"v\1n*$j S[wT9Ö'RXڵ+a%xs!$G?}[NŸg~5_M +--5_ǸGD$ײ} ?OM('Guw|S;wRRt藢UtY16J8&dccc֊`쌌 N#m2 "G) 9SoWg^~thNr"A=\aXZkHgvpf|\ }ObМMTy*+];qP7 dP pb}Aಶg/ I>8ԧ2|T7D;:ehގ>ԛW Gqqn7l Ol)啅(,0Cyذm&9@$\=VK 9fLrT9 7ə#.JO'cpvIRڵd9c 7+Ph`8L|M:n$4QeK_1 ީS.w8@Ḋk`K+߁/^>9NğR޺z(U2mx7kРNW p\x05J(ْ719%fada5:زeTİk`K+߁܆kZaIN+bs6 SP pZY0B5+d[$WXVBx׏njsLr$O>W5gvߔ$'=>RJpM)jג>R39TMت_)9*6>C9ba3oѢ茫7T&9$H'pNLrO.$GF? `J?5ʽ_V㢴iMkPuw<ӻvScThH ŻGuA]>HB8\ؒ/P,9ɩx*Ţ8S޽K!$ ЀiI2j}xH0j(/LD]Zѱ8ۚ )?Kӗ.inj^1-dcm۶ec[3H2zܹcM;MrXv-9;vPêsYd\AƑѐk@Jz\ LrIzC-[9.d`ѣ.+s=6oiĈUUU3 `zH9;GeapGwlO:J!#fe/ InlEID6di+$Q;pgX3֘8 ",#cK5@:G.Cؕ&3otCRȢTUZ͗c={etѯ_=>R^]˦6ƒ\-ޚE_@r233k @rvK=fmY܋ڵspi &9|#؃G yl`Ȭun•m/?DFSe􇋻уz:KF6=1{gSSuF섅Z4D},9>^.e k51TdDCcHpi*.>]Npg%F4-oѥKEƵ)4!tdB5ypU-k?ZJW HƫɏBG빰cAPAtDXX%a7*ɑ-nhޣD❄I>jIO郧1$H YwM~R?&SEnd!(*?L="i D4E*J2UgK*СT)ifO<˥t-{gsAAAx~G ~xǫk{'Rѱ,ZRNQzT0-*֦F( Jh=)Lc&5Iqq"-95^tELrLGvWs\ 9iii<2ᅂj8tX?g*=!IOw_B­ wshJeN!dS}_ DJP26,9f*$l=V={PpqeqLr>5* VF+x$8l ];(]JD.jhz$L#tD,2 X!@K)>GJs72 Im-6'[rS &~_ϋ>SLrG8W%UȖvWS;M $4KM~gm;nmiS"e.Y*3.C@5aikw?o }S 3%.Ζ3 6eҽ=ArRRR<&LrؒcR[Mrq;'@Zϧ'k(^/tPhW ~90o.JNm߯F bj08qaKqzC^-eZroGNJ1XxJIqQTu~5ڽ, S*7]BiDrnK{ZF (~n>SDNWgXkTCSZZku/Xr c^P-\V\I555^0uӦ: qIHgspMâUR羓5I@H5C(٫NtD~=),;]ᶗ-2K*CMGh醏-ֿ]IS]@r l1F111$xVpo.,CsƳ n\&lɱ T >Dh-XY諍5[p\)pa1xM/K)42bz.BW!փ9s'勔-IER/*1?_({&Ѵ^S^$97Fc R+$emȐ!2,ӏܹs~>& p E_ؒ/\Zxqtt+QKRthd(x:pXCr7oV;Q"fA,¤Up,ł粎Q0zm }DW ~l=Gݫf $9ޠo3q9fF͘OH%_)/hYCI}xs5,9B|Zo>7k>YAO =+Ν#RXXT)ZG׏9x>zUٵ"ZSNJ"äD<ΊtmKw1(<4$xr&iaϡ RA5M s#0M!)88{ﻎpU$ 7]MwHB&9ЪX;TVȼwk%;i馡DžyDؗM^f k0HIv(U+S4JVTRPHb#ϣm#L󌸛WnV!;rzwJvQc7sq:{,[ &L@|r"Dc$I7Ђ]덇-X?&U$M6//i8'Jr iq!4Sl?`y{.zwn :@Lw(uvamR`[< mRiܹLr\<`c? 9B,"/bkJ4Pkp)cJ;^7Aq3oW-,%Ģ v'hNJ 9B a1ROHL4FDRAwE6Aj,?*QTUYE!A4"=\1&lGEbdWYɪp I={H +m "jLr[jKO>{[dq bد"N:`?nZJ s #Lr@>e4(+ܴ`z@1  jx4>K[(s P "O/(Xdo-jIPdD"ևЩ>TSQFE"B>SCǤ&RSsRVUCDѪ'E=EqH'B_Xs];o߾m6c=g}VOpi1Iac?%%%$'4e >W:(o WLXw>-Fuk-+-6(Z H"/e3w$KXN9:sK**_֡A0;P| f a:"X-L]QCJl FsCzZu$}#i#vIE%ݓ5#yl$xuŊ7-x,pWC*i11Vǁ0ɱgNa?fZKËkE˷cQH4ۜu>dMlŢLٞc?+Y;c%b3gQ0!WB -úH6- x0 hѢM6M"9O>dӼ#LrTql%crl!d-BFŽhpz\dXf!zmvz.żVr% kZ-Ȗ,6=[D́iU 4H#jMǙ~IٳKzңJ G}D'O$rHC=pyc^+[rcelɱ1߽IGAf/M"Y5穃p!CLZal $R],عiZ+!URdH Y޸z(MюڋD ,`K6 ֐8wz9%oqt 9Fc ja\)4`##'b` 5=-) g rh\zUXzr KV(E bi:DPq6X&:Td.mA"RWJ^V)Tx$3q;y%tF]SP& 1>N k⚖SZ"X^1X,%͞x'a 9]XbGRP7hlc\"K.kLrS#H8/9Rlт_tO,x>Χç 1)"&&+ ŪT4%v$ ht_˖Aa^%b2jal}*ZDq90<@Ne ('!dw5pR-9ZPkJ3kװ*&[gΕVRrgvzyRTɺ+'JX}HְY޷yu#XEq9ȜB`twK.kW_}Kp̓bj,#)[kdcO٥.!U80 1t,C$ ,9A YknݚƌC| 1ɱ@r|E>XReEab,@3̀cXrY3tǧЪU+Kb7ntAk܄'#0sL?>YG-9c KUp?/HgaCINs<^$͊ͳktTݝ:u׻Anc1c4{lY;tXT9EcbKci)Ŗ-(5@IX ݤܯ(ṆaJ2vN7WvWWoU%%„x]]dc-r-|rpU%Tj`aw5 $G JnIpyXr Lr.b222 lNJ111{pL3&9Qwli6&1 e]ȖN#m>| ((r[n>ML`UFLpLwQ]YwHAV\kKݝʶ[lune[W*ۖ"E ww$X /yob{# {W̼{1{}E*91:iɱ&)J؞/z92.X;Cwqŋa:v1½ul+[_SwKwБJ ;=J}0Ϗ<<peƊ5"n ,dH%PtWHS?^ .g׮]TZZ_'E`Μ94oڢTrram?L/rrrg@N<XruV'K(""}k5]MfV\%r욻J\$IC@*9!d˅0Iև:(긜cǎ,Zk֬gAq\wu}TRR0ׁ"móqYَMRɱԲTrm۶ kd,Ҫ/e˖W_MQQQG_=Aّ$0E;濫"M?[@YY9sbcc[pTtWSaciɱ.ܚ3OY%]v2-َm;vПI+WY@j%_VaeVB!?I~tiiii"wN,lA&l'2&v:[Rq*MffTr հd}${֯_O+VXiű8-Pr&HK1uEGZrgؖcGxS}Ӷ#;2C1cЋ/0lRi=ҒzlA4<Ri{<5XrPR/ D֜;ڮsٲ Ӓ%KhݺuN3'&"# %GZrZcC-`LIHD@A@*9 v %G$(9 dd;yBrX1?lFFD7 hĈO7v<Ri19-"TrZ^]A(0mHK H*9兟}&-9ޙZg5GmWARifWÒ8jI.Rɱ.ܚTr]>dʲ9(ƍG7trdf55) CFzg*VRR"-*L,9%$3Y^H%i|uhWu呝! !H<)iчo͛7O`ӊ ֒O-Y/5RfA e_)kYe5UQ9UV#/7p'/w7p jKq(^; @oϖ I^#p}=z4-]LʧT21^3 gR6U +\-Z+,Ӑpa1dcűdz<5!"dHHHHh,[JsJH|̀/`.! +ʣ)i{Z6Lϡ]9Ê ȋNa@m|OtP\B ;+6nq@f"J$X%N嗈PzFh`l[ 1ESғoڴIlR;{ҽ) m8zv}rJ%!gQ!Bq UνYAF)( E!Όl,-)Z{y4)N|/Dc?P7srr(11# 1W @IDATH SpaeM΋/f͚EP*jXY;V|~ߓN4 ?NvH\؋ze byYu\ļ='skd9:\eA#;I=iD(a1z#7Ĭ\XSgmQ*9Y[cɓ va%\Foߗj5+7DžY݌ځ_(b"ÚL#Sէkd;<_֤5OQwgjN4uo_R{<){x ҌFSNqo[*yϠBte̘t4<1)z )S?5Kܚ#,#v `kxVܧ_/*>(X 9%GuJut֤ U\tN8!yKTrcWC+?y.O Ít=bhH|;ZhZ;v%qKeТ=i_7SOvmb`"]=n (99{=jWpg3MrQ{ *ǭ)t +?.BSzvb+dfd!]Dمbsa!=߬;ZEE,'Хx%ygãEފErEPr$,9fyOV#/v <~*(p68[$dF\vK^9~V `sj5&_C`ƉI%r]bg\rmD7+$8Qd瞣믿Ddٲet뭷Rjj*M<h7ˤOl:L6{ }r'֢vKf1hۙ@ XD!lAq@K5+eVvv ]5362YX-iٯ#-92{ZfEk# k#jċk4 >۷wK[fm>JA>^t.td!kN\Dͻ:8:_sh\:[H oo!w~m?pQ:ƍڜ=W]E8[ei)_EyETxlF?&&[g`a o||%J h4}t0ǻ n2iA-VB/I[gQaABU$;ҳEVe$DqȲB=}|͋e ˰g\PIα,bT]^F,eeT| Ep@)܇c Ԕ D_[&'7]J=\G1M{$lK%6:]Rq eM*93Lf pVr~ cԻ̍)8yUN*E`^gX8uAH^A+ǟ|ռ,縠*̳gWN8ΝTRT Μp`LH٥H8`G<;?7/[G`I}3'Ld='4ҟ|ImJsOyK8+?M' HwA5$,4+4V,oe^Vϐ0K<f UBr<ڽ7Wʼ+~9VGsИ5EuPSK9Coܯd=V9!\G)$hi;222x1v Iӓ춵".ah'hJح Y:F5$+||}/*{v r[؝'8H kO3Tx>Av1fhH>^ɳУgKw'w$zv܎;[n[$."yժUS->L+>u|/Mܟr,J"=E,sMuKP<;`*ci:̲g1zyvIxV"ܰq%e߶?G&J}:q:N KM mn?l8,{7@d\^/ $)g+e~ZmNO?m{]K85Eҁݛ ǧJΓlXyf@RvbiW\idC5~IJNspW߸$c5/j% O 86=}dVbW*9YXe2;ͽ}׌oN+wG!I|S{4hǖ&ѐT ӝ_Ez3R7g=&mJZҢmmi?v$fQR :oN]tgSt3n:d}_a ^RC\x̣`va8b$%&aG#(c!qJ(ۆ0,LҠ6bexNŭ96ㅒs]ֽJw;/+qdPrrss .zU[rQn|qP:2/v#㻓whO& kR;  ,^ ~aTGЧs83RM^ &\:PGR`3bƍt7_/&wcw؜XP_pzH#l7VRhR>BBP"&~6a=߮~DҎd&Ye2_nOϥWY;Al3l>ҒcױTrePr@Ȱ@#w"C_q4ϰ%DҎ'g7+W~/i/}Gy{r+<(woًrg!"s3PR;mp-z$EƘэ.یs}  }TtbyfjzZn wRXݞ1.2ǟq=SϹ㩠ŎI=r]('ʎK&~zH:2"Lj:T[Jm0ꌭگ39!Hn%VYi QW~79Sj¬|hz L/4_Wut6s_gWpaΝ˥R4p6u{{jQT}-/c :Էug-!xiO2ڵk8[ZEY$ 7׉o) 'u*^tzG 9s"o6HMP%0X; EFF2.Ǻ-LnO5GG@*9Ach9T(9Ғ8o?^iV'.~fH@p+hK?ӡ2wvŬܰ;uUFa֒424z})]vbj _Y.jb`k KGIOͥc_ut6&(~.yG᜾7+y7;ǀlm␦ l;M>4$ytB[lI&ҸU@^~~~{2wz9et#/Ǽ6NSٳ)f0n~.r;c0vjB` 9b8%,`].J%Ǻx:skRq0JK nP:` /ˏs&`bTϜΣYC]SٗShB|]#H;`8h%_62A-dˍ$+.^z lvFuͣ'xB\kk/Dnl~b޻~xU%Kw(‰!;tO 0V 5yLؘLےTr$Trap<% #=GXxwvc~qAB~yu=d0uwi,TS%57>,^XZL4_Z!,VK]/gE=>Ns|8 ;`U`&`]GCKK%c-J%8'-9v3iɩc¢=4_--ֱ{RĪ)f|B{.fkZylAAƮጛ/L/I]4ZrFXNMR̺셏ҵٍc깧cL{î4ui8paAWN"aY5CLkl=%KS [":DmNk^x4jǩi#bt'|h|;c~!d8Kvܣû׻{2_Še\)5Dbc׷V+G#6&] sIG@Q*9Z 2j%YY~r/Jz {i'kW~"`J]rdÊ+6,>|sf'4'DԸDҨ~X57ޞŮ}Er}ѭxN⟮x9i#Y4٥RB+8))4 Y~o*j ^9Rt(9 H? Խ$h4}%$HXr@lx'W+_M7kAf} olI SLZoYƇ3-ޛi:ϛ%q;$–5Iu f uڷ쁋hTr(irܾ_?dm)IFwi/x.ěAɬVWRR#Ғj(]8 SW̰W1'\IԄ.7q=mH˫F[/??'&怦@oOZtTҖ.|k>|o ]$,\a \2G̿l0F0Ihgs2W,:aaaĉC=j[\C$h4}b;vJ.gA_Go\>SÅ%4 <" Y|c;8)nXd[sj:CwMB9psW{K?XB Qa?c YvAXs`&$357!Ʈq&Ї#Q|de]cߚc|k. 43[B;v:)Z5 1耇8FԞ g5 XKWhI*9.] ƪ* (1qp6 |~qXMf2ᭅ_ή% +e/X?fآC։HI&}\Ęu5e?F"^' xd^"#xkj1r>{>/[ 6Km9x.xTry\7wή؅JI;2 Ef/#jG%&֌ IeƆL)Y"v!4# 2xM4~:RV䥂cw7xeu <o^ :| KtUs86`cWt!w(9Os.>nOZESSdMD$8d0u:e1g'{[CS359H\3R3-ēMzg!R EM}k# <3ނi1QgCmF@ɑI4!d4O*92JNaa!akY;^\޽rxgJ_o3} ځ-8KDp7)-~BB*mw~*7pL2cǯ" < x =R#.GZrZdqD 4v&fpjO[LnV}埵`"8=>\0Ut>M"D;^<nB+1x ϼ'JJJӐr,RqH3h7c]֢غ۷o/p6%V'9K~vKnGbǍsnUm)URP7}vXqQds,rhWizNT;зSq㤻x7lQ/^w0!BRɱ'ZJ2YZrg\۶mө,9KI;ޞ õgpu OEb7É"CI3 {l/NQA~e4Dv9>=NH 4Kӌkyde/ڵXRɱ.ԄTrPr=J2q 89 Š=#?m'a 4-?hxY Q G?~AkZVWQ^x! Tp w|ΖNI' vȭ=o՜,sҔhJ"s멨Ð!<9V ނ൚ dHJNїJN1tѣ٣Mqȡ#&">'WL,Nҗ3QN֝۾\Mᜪ<6nE)8tWTf[e_pE^V޼|f+q+^^Ak\Mo\6TȆIeǹϲ]Trݻw{: z#1[+7Oa;g  `I΍+FBZm&]/]$"g?3o52vع].CYQ=/Võs #Jb8 ?EJ)e KNff&eee9,{Pr@|hv Eۣ֝nv}_uDZ9%Gm- "k\6sMCOb߱9wOզ>t:T3/ya3<βEPr)&3YƁq#b%IAnecBtY3-JγPlx ]?4I?o$w??jKk\$8ڴv-ߟNw9P>4-mҋP+o Uң?o='$D9-_-o>P2YAnu22 y! fyǎ)88XC777С?]8$ i K*^" $7ή%u8^n&u|ض4O,=ޙ/Eh}\$8%* 2YAwRɱ }EɑsŻ*kX:v옱hao.EѡtD͝Ou/4B;k7@+--U3& Gç4~SɁ#)" ܒ^渲auF<!ꬁ dcuTײVe+JNZ %g:rdchJNnq9}u|/roSgs2ߜB ic늀'&\GDaڒξ|Np dYMXĖUE֞W_#2YPd21#-9!%' |Xd 3GJtUsd?v菹U{%̙3mvec#HJvj?<[w J+9\{^s|@ 5Af ;! JNEE9b%IZ?dWJ5u<֌g RQ#X/fm#5|]XIVvG!䧸`b? ?FpSJ?Ф.[|._] d2a~٣CA];pf̍Ny" < k2Jwsn\O!,X?z"(3)kPtG  ٵQM7JEˎcӺ#57 ruGzJ+W>,@& jFdHfZ8='.X*al۶mow,RqՋo3q)w%'3NaqBj,r K)yzt.G45E6{ }5EX<~~ "_Y,PSdCMl@&|2ĥcn|Tr8PX*91W.ٻȚvIx X?c05F ]˪M؁w%{ Y8 0Y=B8{$l@F@ Kzii9?wH% ߷o_ڹs'UD1$$$ѣG^n9B{P6}~$j<|)86>6 ڞ7ܹ#9ee ҷ  $0Nـr!dJOr,C[Zr,͕Jp:xPA 0S)) Pv0aQ]mO^&&:.z)|{D\v1X$dMS M))<x;Diڶm-,9 {Ѵޝ4]8q|#A1 ݃1&깰{GM .e,$AzI ـ@VYLIPr@e|ԳܹsR12y%# '/.zTR1pwjq9cwȶ%-Z״IlAfZO*>%k ]mG˓2y5صl@F +j,A [zQdd$H%U $-9-M^*g|XN]Hf5v${>GŧZqI'wu<#8 #;Bm|h@lAy֓VN|aiE=YmWr:}]On8V_ dEM]LA$ˑJX+:,d5XK]P;DQ8vh1 UM<"%dQM vgEYOB]S\x@mU ^J+N-(A #ȌB%Ȕ޲ MŮ`Or0\9F;A@*9vˆhvV{̰pyGuxطwND {Hk#xx{QPd;OMcvY8^u$IizŁT^Q)]47 !5 dFM)ȖKTrGIIG$IE@JHu}(md\A|ڣ%g͑StAbH<#- KUrJJECGZ!Df*ȗTVYM[eΑ',4)@Ȍ S-Ș^DǏҺ=z8q:tnp83 %Ӿ}"$pYۺu3Mɡ҅+ۛsEe lM;sHvv,HqǍ#Y@vY2R4g{g^oi?]끀TrH "*(iSd:z*ؙC'Y;&>WH~D9s 5[sy,hO?rcKejvcTOͶsP?ھQ>,ׂ"B2ف )قA"Vó)mJkw~b@%IZTrZ\ %vu!B9x]LLّ;g\!f /(6Zwaɉ3Q֏ e뜔e3x'/"Pwjd3ȘYwvG111q,iQ4X$@KJNKrkwN.k0+99oL&f3ѡ؛B04J5@:Ƶ:q^y Yʱb= 5vQdH5dLT1&XgddزoJ^$-E@*9-Eίwss#dY'%͛G:k~Y:WYAQ ̺G||T~po*"N%m+,Gh,(sbR~r}PϷ[Cfz Rd yxxPDD(P^{,17-[F?-X`! ͳsRi!ycH%1dСCJYh{ߚ*5k /@בJ;`, vh`@E꽔X+KY~L9?7_V~~27*?nV%wQt曷"&Y7x)((7[QVs޾!ǖVX]{>εu|M!5s[GEE5~!o#%>w\z뭷HH=TrK9F'_Z5*e yfϞM&ZCtUWa }q]Te keME(9J)ND):ٰaܹsb/SZBaoi{KIIIvUPf~y⽻}hg6m˙ؕʞv-:s~WSTOߝeH,ȧF J+SV,+(C1{!Kڱ7{[}Ϣy4= or&Pi2YӓڶmKgΜiK噩6xIE!jɳ)jJMM5iyyyTPP crgC@S6E]v?Nj.?MA2х9;G0>Əoؖ.]*BCC+pq3g~`GPgeրL.QaOچ-+}'nY1'gێ1zFT{R؅WGH qԂlyN@a"Φ x%&iVy6r ~ 15T䜡~1,η},O'.Rߍ,`Teyz,3 S/ug"eh: ,*M=@2Y,:yN >&q v}JguXZ>wIIQI{6R/V2{GXwxԇbxS((XyU'Ou{3y-}?pR,Ɣ>s#=mX".֖7RА kϵӟʋ2 7'D;a߬mkҩ^迾.n˻inÎ|R4܏B'\FE{T;QSXbG߯ qO\2/fǐG`ƠTt+Ekڐ%Gd1MM&s7Xywc/E>WQȘYJ?꺇JS{Jc}PwxaB^ @^SM1Y3g} dlzc \Zp!+믓?ƊgKgL`ΐjy-^P$IZ@=w5Psyt֭qٮ#JO~](n߾]o^<|0!;\CoJ:5NM]/KR=z?H K+Xp&ZԍO^mR[G [le̯ᕛֆ@ +^MWxϋ5T{dNY*2na;*?nS@s vO~8/ ,F:L,K5pY3^i8Ž5 D`2'˖Wx@]] ^bfN$*ܸ,NSUh[{D4e+zՏ]hA+ ?I".9Oir&P N|p8VRVPml%y ϒWT'ϪQA&6P5ǭBF gȅ[WrLq<m+\r +pSCw3*AQČ9KL57|/꯹y{\; ;d ')^{/ !3}Wzj z{)Ep2 ݣF /~C6y+:ؼ#crlIӊ#LoĆl1B|1%F-~cZ9vvZBS !@矧{EVzVZ%<éQ,mΜ9(mcǎ%e"wY;54G , di|%`&镪&R>s=2@mBo"PP|˻+2~^֝]8a:&']-2x#f#px#:tx]p%TYZF>[W] gxeTqTV5319nopiK9@ޡjVp<0Z|9s`OC,O˧5sFa B,MC& WssU5Rd ')J?n6de )LÂqM=6nH>[ggqs`J:u}xEv5s  `%& yj(@u!ܹQ.vté7eff hKL 8,5p_C,CaR%}o1?IIIi&^k]麃t׫蝛kD_?I(SZBUFaHl],#cu%>sHh~e; ; 5P(}% I^5!ym(PB=ZRSXgE%MKR߯>>r=]iڻyeS{]k.6tg+-Ԟ~77E- .OPPL)ޛ^7}ln<<ѷʼZnM[ dEg=[T-((հ9r\!:p8F"Sy=^r賹鸬ɉz)aŲf@VXl<#4Tw'o{"A!>B#,)C 53h)a Hq6ܒL0=v!XF6V|AF*?/R$&I5M|0e-֢gn^z;; kp%ェQŷ î5cOj#::- ~mKR߯>B62J`bӃl츛ZrP5U5ףEzܶ:6M R+8xo,&QSo\x믹ym[|nK 2ف IXrL;mqXrvjm{WSchyklEj_[%y:yL=%GY *~39҆ %ё$! R[^tQvu:˻vmMci}%p bd(j7=F\90IyeaņEiWLB,ikMGngFӸKm9˲HdȐr_z^XZ|nW(5P0X[mª_I!]@mS=%Gޔڦ[٪^%KH%G/ӻwosG(6G5JNAB%'{W3~".֊Q۱O8;^9\TIĵCnfu+dj6&{PqA!)rtк*Y5O*(!,Xc-]% Pݔ!c5#b>I!KnSwF`ݞ)9Uەl(&N(KФP>EupIBDޛiryo=M]vCyAmL Z޲sYQ\$\|Lȗ鎸gْr-.'5ȇSHGR (s 3ȐB-dMoo$-UȐ-,6tV;qn 34c2:tPrۦ1bXb]MI!TkY̝{@@ $Xz>N3)rGNg*]BBYV/_] dGMlaɁ#-9jnjZ\l Զ7 eaVVʀ*ЩP\ed\N2?*èD?d[@Iv,gJKHl]@ݢ]R5]/9y, K0Ȏ [1Ț$-PrCPzYA%C"L*2|-^աuo3T1vkJ`mIl 0>g`m6BڕYr1˱fPsE7d2*BC$(7U魩Ljz^Bsws)98\BAۓ'O:3]nnpYCI"[nշSUo|P liU0#PJNm3(H,lMX*}éj]2m=پ"P1Ȏ [s[/hMgmݻwz-2^gK(9Ғ,Z7$8z()>GD`e[vd۽ϧCݑnDH*9}ZsN4@gw 4lO˦,czl~a/+]^,)iy,hB݃@vقMѻ{oΝVI"J+Tz6tPBAN鲦u}@Ab:P{VEEgNK7:Vɣ&(:},7jZrk</U*i/w7AEG.I"` dG!d 27)J!Wǎ#ĵJblaLR3ftYәPr@F Ekh#3Si A(aLxEe+|A{=Y%Xa$05ӛJJJDҒS]a8ŔGwѠ#VٻX'SLJNyyܸ"#OFaupvv|C I"'26=CU;ÍZfLR*ɥb+&rF #,3jLA cz ¼5CFҐԩ5m@JJ镜Ν;:tLheiӨ/_u13w- Ki*3;ԭ#iZD)*)զ\ tC4mfd Mc'/OʗEdQd er<5OH+9HYm{yy9RVÇlP+@_qZF+9b"-W^']E:QT]Y9/HgN`XM ѤS6?FNqmھP^Xsҥ^  'zPAz:UWU۪kٮ#ـ@VԄEd% Iu 19Ғ43Q?pQeN3N&ѣiRɑ19Nk1X"\[ZrJLN5Ab[ISӎS%ș= AO[خ(Cs^7{ҒqAx'>/C}8  HQ>%ȔQ_VR 6m"oooiY ձ~ٳ=۶Fݻ‘JwCm8 ͛WF`Ȑ!*9"El",$0E H Cw]>PkY~qL5P؏JyF"l@FB@ȔQ.t;k5j %({!I?(92hv0_Ir.ಶyf:ysM̎gb7nٍ9@j]w\v$P#PuK%O// j~4Mh&ݩtc~@"_#s$EdDM!d(%':ZF՜~64h9k ÙQKkHn8k2aBu;tF"VrziQJԠ]۷%lEiUrK*9ʙ.Aȸ]2 ȸdǚ'>Z]`P~gB@D@A >fH"{ݤMtU.3 "i}d2%WD@ w%AgΜ=jDZr W_3+5Z#e#!Bƒu#KV|:#ݬd] YLKg~!CF$PB96sF 7MJllb\VPs!0sL*//.k:uĈ+9(*x1=rEID kFׂQ͏сbl4ƊZrv(mች(!()=/\d2P2DA$%A2 gMb)ׯfZvmkr9%qgϞ{Znh۶-;ΝkctAExå55)xQ3!){Njڨ~G3[3n7q\Amzmr(a>G{^ǡɮd2KdHBl8իWe8.3ۤo߾N7&ஆk׮u ]~`*,,p\ z1eQݩZo{San&&م!pz6JFזD R6 {[RXNo;?'eCUldB![dh%')I=&$@I"n:ag&QC*9\r%T5R~7眠 EAUFb2+lPvhJq֪3[,ly:'ߌ{9MP͍&R. YV}zrf8;cF @Y@v&'5\snjfѼq/_NGv)7`8wy;Txx87N R`ЋsMM@l=rXGTdWiQ+&^qėm!hW'pk#UX̙؇v"ϱf\  @@V@vj.B %],L vy/8#o@Hr>ಶpB*((pጠYIIvgۢ$GC;-e5C9照NSNj*4Skt,qe Pf83q,ޡ^^xɭ[5G<d3d@M dhkVqqTr3UM\b9RuY%gذaUV0م\|TUUEӻk&adЧc^_qcuWղ:r>``~6nhT55kF4YݻgjlTAi,Jysۨe흙٭1dwF 6*BA P9^Z ѝavS2 ԲA=볶hSt6n\WA{mr".]{]aL0ǘk̹5|oVZs' SzuwvA3BQ (u>&[j庋J;9XVE[*/$:t@ A.[IWZ# ?|e-*WPȽ6Xֻ]^4H3.pd}38f>GĮc7v?lc^D>;wPҔ-s96v;ǸX ĞajҤ+@^Xrz0rm"re4Ǡ $hт/P[jРkAe ȼQJ>ZrՋu^8R(5w'=뺿'ܣ6>}#PA"9b1={ 8(wfI6VEmF[jUCN ֯_v[UF-I@۶m){2缀]+|bXPbu+rfOwΞGmA_z׵̮P_@Z-m)_G4f^U$]ݶZC-Es dJv|ƭ=Jl/J":I q6t?] @zFwlWflOLfD ^oD^do0K)bs=X@K>Ν;w$ў'Vg9K \DVZEP59DH(H3n8n!EEFQCy`5}|n1T _6)='x:U47}37ǸVKT8Fk 2ֆ#t5:Bs9\bNm89Кac+wTBuE(ˁK{tt4u1-i;9eL0U׏͛'>j@0SH_В[<]~̝pzg=}i#{FlB[f!(Bk`aG,?v! NjʦM%`'3simkKК,!{AJ@IDSR{ŊiѢE y%"лwo{ع&x9!!!tm!NwC' ѷ{~qƦ;+zUΝr?c'=I7#JX؈ܥg,/D-t)͘~c=uE5J{*7G15k(44ԘȨiv~P#AԙJSjIxGHhJPRwCEY۲c4va/6K5˼?hvk,i s9Ysv~q˖-5jPb2Ҍ@̙ +H@n6j _+nAgB" ^Zu3mڒ zPЪòҴ@y m\k5THn/wԭA&{6MB N= Nttlqs9Ĝbn[2VmϞ=b3]vZuVԽ\BpԠA6!rrIs5 r x7ʕ+MjxyyQF4Λy{4_fp6oMd˗Q=[rp.3gj`24KMF>VSV3ƓU֚rf mClݨT~e^(Cf[5v1'̑m=S jPcDTlY-w%}x$K*ɋ=~̒IR5kV֭/1(2FZ?ljgXHYC([ދvzHTD>QsbC6]CnRltq2zi|ZnsA˛7{MO=l?f(V iAٲxsx.lۮB!jÚy=N,|"< b.1Z6ȷ/XO-H(߇ rn.UN4x`:u!(͘ T%<]k:"*ܣz1g]dcgrdmﵡ y|²M]rcC[_ٶM|XBQOݏk{aXk*6t f-ZL^MǮ*"Z i+yd=s?9a1S-ێ;/ѣ钾YRպvɋUl. y?# ښJfٞ EAMbj:ieG'/ .Á= > s9C\r:WL3JTTIzD;mxMG n޼i:033d]$'OxΝ/'&&WJ{Ûqjһ v%Pʜ1[iB˛6}m-Wlxpu5k Y'b*:=pp*oiƽE5QW$H]ׁ:PIN@̹P\f<e9ZjQҥ 2zm^/^ uЕ;D'_^ Mķv#6 _6$̙ltT5DpJy_>'OA(gԽZn>I8M7k@dNp|!hW\Y:oܸAb&dGSV jķ_w-;Wr| wΜ{P!?/Z8U*Y{۩) @{1%' vSiZ%罨'4wyU Q2rA_i#{m^3QU$p!1hv(:8F?yd3f- ѣGK,\x|GN52d+kRTqrp]oCVz7 z!7ִiSLAq#4m=!R~:V.`nݥ~3ޠPŎs*U) 8u4&hl HT?"_ 4bWEZ!,ٔIpꢃA])Q_ /$D eseeKբ v:rkp}Zcjߜ+ҋ/H[1lBwf[ .u"Ehȑ:NҡCiC_ٓ; P##39h ʞ=a4D#L1Xhs筧hDU K@ei^R<<)5ɯhD4ORȑtq*/ %@=~HY^ sgZp:N[GW m ðE{h֮W Y2s``Nx!7졈+WOR4KMJ]~g%Qќ L'tO&d ڵ4:tVJ۷o:u }ԨQ4m4Qa&5ݻwZPjҤI4[ꫯիBUأu߿O(օ4=W&F`NA=2e. "?#()[|b3=3ޫBX 9 RF)}޶ QF;U .MXTAFWG[v;қˊQ0o[wib,H*WRGL|,s o"ɓT6ؽ6//M[[lg{ILp_۷ౢ> l`MiGThQq0zN@l޼#x@>{= P##B=Tֻ[D4sMċD{S. L/ZVFY42\zEEޥJv(wB%G14dN$iTkE"Ր6!y}hVvix~q>dY1O)Oy* &<(G 8\ 5X^4ӹze& ~a6uf5dnTfhA XL0Ad)Ѧl#i>3gxyy%}UɱgѺukcCg >-Z$><2J֚Շ 0$llAvQ2:#{?+?ޅ͜ry]aƒ.R+>EAW0]U*;6`8 {|Q=F<iT[ݤw|z٩ :l:A߬;J,$l ,giHB>yN$O!Va'3cxKcOPDZ` zodE&7Nd /$8/^\`>tP'\AWM`ڵy'2$npW8XbK޽9EBfP3Ə³yatfjYYU+G$CU;"-g F>;gP8/ >b.MQMeQL^{ u;i+M,RVx tּ|2?D̻,>K~%KPr(1|ӑ"Ν'i;s-oIkB`Fg,o0!zafX/oߞ&Mdf(\6vDQܹs9s uDRA-hH5m̰3zSN5v/D< 򔝷 V\>E [ʉ7A߮;FkO\,^d)-Ooo߼IaJt<3uK@!c{;_>,+Û~].E7eB=HNB[rE"huHʖ7/e؟3&n g Hr"f q03!4V"Ҧt#oZgppΞ=K%JtW(Buh6x19s&MhNEQC, +ڽÇ$Ҫ y,M}.\ϟO;wVnl.GYp!u\hD{t=u}9ËOb,[2ygq |$tBM^n*Aߟ)!6UBT1щ5m]t9ժ2O" uVL{GVmc8kwrpm$V>Γ<́G)!^rxydȑԦ|u4'툡vӤDžW2܉Y#XNǡF[jE(8Ҝ6+POٲevպ̠пY/_@c-beڣz93I\CGцi~1"99)dAt+䐡.TJQOiٛgg+ˇXZ9#xS|/gq/gL%g/%q[娈HzNX U³KMR,%WώʗՎeFx 4Tpp͛G]v5R=KJUe˖nGPCi\ pK~E@IDAT 9Pؐf,v)h#G ByP3cBe7XcW;jsَt97"Pӧϛ`3'eL19>=)))#J͋.xqߊqYUȚ{IV %mG~qZxnbQ4NҺMP$ RҼl಴X e">,'{q(p$̃p/_v/vŸP8,̟#ٓsOOh9s D4R`riɵC,WZcf;2+V Ds8/ Ҭp+mذA(p¬G9QFG0bq%IfWoa9ip 8&fNrbv<Aҭ os-w4^5Jp4(3+Vnϟ_LuP `k#M˾ j@Z+i3jܬ?uϨIa5TMm ٳg^m۶{"6 nN.ݫW/Qikm75k&T$9S %D' }ِ_-(tC*jO/Cr5E&v M!Dcee74jSN>nrm$X Dˏ{A2r?cϩXZWGyĽ RلTt D;56¨PBOkPhҜD HTV-QjyWuˮqrBYi4?{ץߵkK"j׸qcWӄoA7o3\t@)"$Xbg)hPrp c1&ϖUՈO4]ѱkaлш,@j^B XϝYF6dD ' xJy|7r"35-.ݹG'oFepBYЃܪAy`|ɴ=z|7q@$˨eu, .ܹ3qr0iӦѐ!C!LHI3͛7Fр nܹs[nz64( h`#a)k!7M FOʜ ҋ"& 90nir=}3k5T`h,F],NwDz|իWwp_E%ub$6lY</@b|PG9ν1˸Rg,;RFTA񹐖(a>P^Z5CE}B"ٳgOw@?Wtswk׮-CF.q{aSB_C9vE` .W#2Y1;,HN¢MO Eӓ#C I8H la. My@߾}gV )j idx8oဳS䬝4j@DiqxZYԸ>F(/KԛbXuJ* q?zH  $pn@ûjժB?LgM!ЩS'NEtb՟gf̘A}Q&oJ*B$5IҊAڜ<_c¿(t5 NӧEdX8{_b= AiI"0ɥX`)Zd䛺Couw=tjIS… vdI@`֬Yb;ҌرcEN3 X@\{Ҝ d"8s:)G*)ע9 5t1#fh"x{{f\ZիWEUǓ1='Ç'ݻWe{&DGH_M8zc!t2e 8؃Mft;:fN4}ذaCMSc9ٵ43gW_}eM2R(?~X9i*_~%!7WzHzX%8XAQ\^3R0"7oM8Q:8)-5BÃ~Ԝns\fA; X!y̙S Ҁ.GiPHٕQT ]bEK{%5>cJšYZpP#gR6iZ?y2 i&wW{.k%J5ob'M"Z _Z{~ϰ_ Թ=q*Rs/fֱ>ҥ%Sw#\x ~7׬Y#@QҌ@ܹsL2g` BoL2镾u7|S{$B*u!I3KB,ՠUUn]Q\F#GP|w!zXhтj(}?G4'88{}DB dv S5|A=&jkǎt)+nq!{4i~gzxq-^X(I'Yp[xD;ˁP-0j(Ҿr1QO^Hnذ L]N~N =G!: 8@>4zhL LX4@JfռSrhQN:9Nɚ5k4#G}DÆ ;:߶m[6h A!{o>wUlٲENjn/tj|ǎbj !HSoݺ5A2BrD]|[Ӥu-׏@ڸq#ըQ|)TN!GD޾}P[n?۸ Zk֬˗fΜi!l]tW/M,ZHHE#Uͬ7mD ^=O#K;wJ[Mu5Z2҇4}#1cF;w.!'ESLkk׮Uq/oiF’%K@b7nzn0sUЩS'8E Dv-2@פ,Ţ,J ׮]%u@ѢEE;l AET٥ [89ΝȔ(6  gɒEDKM3hTQ۷/̙&L`Q&=hs;wn1cFwSoMK.ZUTIU!' #ڼys OhњC-#>D믫۰[ ]v|rxAƯ*Rӄ$o… 2U->0:|EGPV4a\6 ~*_P /-&{ @P8:pxDPL(HSٍ9Rٝ?^ pN'8 Cnڵ\rihEn?~, #rf4|!3zhQŌ8sPF:CP4"K'/^1=zZTEXbb_.ೡ%>I,ܺuKKh "}| ݖ>nC/ NvD$+ ")nbSi\?!@DԩSbdQ?52e'O1cƨװ[2i1OB FҌs%V looo!5OOe'OojZ1E Ԑ׷K;*mݻBʸpb1]t{_ШQ~Xw(uo70WGa1#:SiӦ $*ff(2v)c0G)QHQ1IӰXHᢑ WRWNfgϞU8Zo[Upm4UJk#,*C4:ΎP9 K l?^{ kI޼Q?~a98w#8:; B( V(1Ę:իK 7KJMVsBjwQ*TAq,a\[Eሳ GDž1Sp*zjH}+c>Zu89L0#S*ً(+Wm *X7ISÇ+ļ RD]YGVJ7o&JZ/B/ܿ!ϑhH jv -qZlIǎy;>B%u8<==wwh2dò1rW/_ls $}!NCn}ݺuXxG{ 5`)+1-'D${+Xx1eΜ9oiӦ MAWBE̙#.Ӳl%-9{p~> ,,><<g/`kuu9:7"D=XqNtz.IwWG:9$w܂[y5J-V<7!/ B!/ҎHXbQN4֍6L0樆'|"x;GzOP#1Y{nĈ  G|Gdl.XϻZGSMpr8H9C݉+i^w;dɒqq]?WwJαCi[9>L;ϫʕ+ .yv8Al\SgY k]|OrV&@ kpt@A{ӨZZ7v B0vAtڲ@Ip\@_ spZ< B8aP/t!:1#YqK< D bјg^[kf^Sr:59ѤI]EW4v# kL,ȥeNtv wr eHӛo)֨H-MA笋XHS(kqmT4A89۷upJڴi#^s48v/"v6C;Ƽ2ƅu By:"ZSLQaM&B؜A1O̲`q} iepP>8_EgiHp`1f^9'>V[b6ߠw{Fc@%>O /T>Y(pH5Q4) ;vt̉Lv bwHRk[_(8HωUi!$Y/E&d.x&gXpN`\M\8p pnh%< Quda 8MȒ"{A[!ʴ8>Zaaa6B 2\?88]U#U~i 0A}A}"$іxX!Y;e&乑·&8#-1B40lY~G`C } Xr 䩓W N?̑saұ?܏ؘ 9Jɰ W7[39p==6E8㺲M3qړIS@uE*!-?dBiJ#nyS2aUtCƍ'pE:sÄsAH9s$@6sz7UAũ,; ׬:uJa·8ڕr 9iɩlJ>}{5msq'0VH@C Ӑqm\ -)cὮ]*Y _\wFǑ kP?:ڄ^ 5+RCfB}Nb\L3l3¢ 2!<5MCY k,!^8zcGD4x0;3v SPC* u:(,lwR l© `Ic5VPM,JٲemxI*P:d'XIB𛸇΀z̻ ݉t:ΰ#(4g`zt4H[>.w<Lni ubW>vou#|i=?} *^<ۋ,8:z 4Ag$t?;X)gΞ}z!?Duaΰۏ.')FT8Xf^:'1[b DouĜy<);"8WQJJKUW|(..F`iT8:0CTKd8[,4Q)dz=%f+,","λlY Q̵0!kKR AN1!Qnl@"t$prM v  jقTkGSHTSo>@b_ K|x`|DiHw\6K":=N1Œ3=#繩LL}]QK">U?iΞ"Eޠ9.d!+q})R#lE j #O@#j uH p1Ό3ϑ?I5|ps%ي^iss]W¹!;xH 7HA7(\]u i)$1cRݷ/_N0XtµV@o\.WXn\apݕy%8r#X "Wx 1/I*tA0[~@PV)V~bj *Uq괬V (ёf<;bJ.H%B2d1GqCTp'p  ;8R!PjLssrK*(ZĻB-ޱ5!о}{bF*,;G: Z8mE HiCupDڊ4"p!:Uz= 1()NXԼ(ڠAtSgZ]- AP vL/ɝ"XA~6 y* ~8rzz *&@nٳ'$s5Q<NJ04BЇ; gɒōqݥO8!88MB$YIR=#88 $5(Lq6Q3${A|TG5w8;\P;iOPP#jJԢPmTSG]lH3 CTchs^!o,9sx88۶mM0NO*UDH=I=Z?jM$QXB "EᅲkɁR}hxV҆R$P7E̞uz6(A+""Bݎ {iҴ- fϞ]{tBc_v(bId#|WbaXa'͸H'ǁE˗ ?Zh!*\;p<^Ž;DW1)-m`ҥK\f䈛ѰLg3Ņ$M[`3'OHf0t"oWZ@w?qNjk??7$qpPAAAT|y8x<̍H!%@X6P+S(EL[:<LJx 2e Z:x]F֭[@<\/[l)l{ 6zhz뭷+K: Sw&PpqiFTǏS؊<`gQԩSGF bm0ϰaD֤Dq "hiA fP7 of̘Av&DG=y! 0:vHQ:tVȮAoooAF(w!ȑ#hIJq͚5EN.򜥥 _)r gL/_>Fh,g͚%"sJ@3%JFǜ؋0]l2-߿fܦOUT%%[믿R֬Y;`92H'.,G/(66ֱQnAiVsw߹Fh޼y+$5`XCBBDT#ͫÃW_5Ӱ5=V(b"sA+WNUs(FM+WkUL9lOvT`A:v)oHadؓ'O!@AEtiF؉$riGNKCylӦM(Y pΞ=+#N:j>aA4#"8yVbEw=زeHD46+ǏZnp]@Ff7)=" GB<{,sGsB"RQݻ{0w1!g}G4~Ϟ=Rp@#S(Bbͦpp~7C/f|( 5~2 yLWSqZsNO (,ή4"Ю];B5By yR7Δ)pt̢VpaϥIo9a:u 8nD!l4!E 1zm8p <"3gv$ՆnHC2ŋlB@:9*#HG}D( _JR8"o{%zRv!@":PE2ac4HJ&_5!ܱŋSdd$͞=[,2c s:&L2;8" >976" :/o 6#8;&%+\E}L]tUy vZ1WʦpUj?TdϞ]ٱcqa>StǪ>DBI?~#;znzU(,Tq8qv&Ë{# G9MZJbJl+,$$Dq[ P.1ݎciCW첡4*$[N{I|||hՂ\m KPJ0͓\@aҹtcVիWO=qD)5|ܸqP \b 5`_r?& eϛ7Oۡ r7|# \<\wj?-[*) r Ȇ z 0AYnܸqb1G6@핤 u@b?$/' Ajv} g8Ъ60N88wo-D\FC1eT-y"\G<;v\? PtdiB)H7VBCCԖOFP+"@ ȴ 2MUcG0^#5n76.֭‘r# +=zP6XE9jʣGԾlLIV@>`|s {$/oMR-NE/"_s&g0&*X7o*}`$u_C6h*&J dc];`4|pQ) "P4 :(#41GS'(ˆBxOQ"갠(f'u]}nčz1=;(a=(&.yf@̘̃zR)PoʜQiX(mf`KX( (y86Ш74DFԓwQm͚5 u!O*IKȸO2EA@ ?tI]@LNN 'O,~aGn]yqrH_S7#p:v] ѕ]t8*!8'byNTd> ˚8m|wܴihq"'SN… .F4o'ӨFcf\A1Tvpz…UlH'GTE}]ڵk/ ߰0y:#dy.BtFjce{Q+M4Q}ِD IRx@',NQP1b/eS>lA]gmU֟pZ5D.t{^AB(iT!(qJ .r;wnQ7dATϺ(<>_W,%AC\AYg5ytڵk+'|"8;zٵH E @&RZH8M.ߦټsԵZQIeƙچX2`9dfϏi%No[t0Hܡ<^ԯvIa6Mi*կ__fA=#i#=j|:sտ[DqhAtf̙Jjn/ϲ@| B,ڞSNqx:yNʗg'b%^|y5k >qPRd{H)Pa!nhM'h>;OYHs"jR?ENq[hܙ ̣x>D>d")+Rf{@$==iγtm/v~Yz)1f>Ii@{Fs$?~,rB\A#nt{ť4cU! =O&"X)wmӧEbގD; 7k֌̙c"׭[WpENp:_w2צ,;o/Kk!2[paBiy#n>7ed<_nj3?Ot3P:$7)SGxbiT򿘇(s_QrE ''s&eeTLk,;vڵk'Qo(00PC߼y3k"HRZBX4A( $'5!V" ܹS8;[nM(Rds/Hs G G5 n=%m 8@ӧOHiDox-=r-@_F}NnݏэK# "E){Ѣ3'%I]WH$]7Cȋ>5Ӏer@Dqg;ע &z|9 c_zuȑ8z 2#GQl'l?mCi]bKJ89_>y@9|!8KԢ$fnр3L)GSjpx"8ry =sѰFH]`P^Zfה/:@zY6=fv$0ހij[ݝn۵` RFYB[ԋf4N&i]!ڵk͛;zv?c:u$}|7%dUF'OذaSX-2#)YZ^rq4pvxMsb1f1Byr@Mzt6= ;,q""AKbB YPݪ }( c=RoWX!ҷ`ذXGs;kexk/vBL*̩lD}>m$n8;E iF6xx7ibGl~~~:ÃjHc]X$'1C*P{;rG@IDAT둏c|;W-JS9 >o7"ȿp!Y mNvx<2Uw#NkoPc8}h، ZyL0sD3#@LȼcNh?(a-rr4N'i]ۿ?Anzg *ԧz͛'vAH0&Ҽ ӎpղݸqCl.\~=n"qAz&2c3פyF?K+{ڦ7htEUQ?xBIsyeJOLCڍրGƶ3+΂ TF `_v?~<[9BZ !64'IiEV""5yqٲeߧ=z&;lNa>j( E<]T>SBA\Ȼzyy9R]>}ncaDIj! xw?[wT(ާ>Ț9-Cˎ\")o+˚ ȇtEm'@ms>fEsWQ_S .Q؃UH,v̸q5Rd*~GR $zJ&]tr\ &/w ?|B=wyG|e˖M5r HÇȽ̙c'NNhɒ%Gfl.$eJ,c:|&uMNpX GvZ}!O.ΙV-HE eoKaiβӿC=z-zNHyS/6dgԵkW >_F0pS^*_3ܠ)Z(XB-x72 7 7K^z'O_U\A:908Q յw]{,ܩS'B ʬYHEfϞ-6?EvQX/ₜ'odP3*?! <,А;K/Qci>N_߱=zHtNo7, 5!+׽θ.:DcW[Tn0 DEi`IK! vE*-R ?nPFD@H'GǓ紮XE60tM-u`W )l |bG?P8H.IÇuK{۷'"(]4fm )[ ћќ6~1)P.Gq HID"z#QIONUПPf(!*ijJl=M#*`HHo;wN Μ9SDkYp- lH'3Ai)lAe&MgGOՔ Rڿp#M8;ʕp…>ba DnuT'888ne 1Ƕ8癐H&:zתM9Kr}G\M<|@rZO6CGr$⟣o6m"Hc@8pĦ UN҈#,٦;^?{A>!p<er.8-Z" ZGHo<(QB"j!> 8p@HLW\9YV\*!_ gL2"Wΐ!P@+l! |iUcSɎƩ^KRE_TSVO>hO#Rorj}d֭gҎ;t@E[7SzZHgc(R+0q*sجY3QիEHť9( PC:˱/-!g/ʻt"{`GJZ5(Bxp|(YR;W iIdyUp7Q2exYѫ2 KŸp(8Mef=viu[F9'C*j5nܘj֬)OPCM7DkJqСCtRڰaPo`=l6"\ @# Ht@+$ή(pE]3DxPe˖ D-M(:zhB.uժUE:uXޖ@%rCBHвpo7^\~|@kFL5#bmᏢOh;P>+j[E~ezD7ڴ~/ rມR6k 4b)Z3X Eb-G`0{7o @ܤ_~bSR}F9" /x?Ho@BB" (X@l (~VUQ.(T$PI?n $[]HѣGHDUdsGYfX$F++~LPϞ=XuWZxu$Ou)#pPϋ?cn8f:Vqv k*X _m8h*ݒ.nJ\|bno p{{0u>z2fYW8H=8D;c;ht{XWK]E7o|B{l@nݴ ;RvmtlfmڴXH&Mk~ !Cڕ.=)qwr1S\$_QNy)M;pFwwIPNrJ򒭦z{GIV2ͅv麆8>>l0 I9E o"#& @V)n<'`N={`XvP uMf̘!HVsJlls=12]%^ة;.EFb Wg(Nܑ}I`ʺ-}mig΋1e:FovW0mhg%̙3]F D/3o#~;)pj? ]ͩo}_ N]}Q Ū\Pf@#&&FW^yE v^x./vٶmtRq6V֐lҥ-P>IPy}|WťekXZ N5ZnV~P&Ɍϗ'~(9GqXmU Kd[Djlچ!V"~n; b =z߈k]])5$c11G66XwbG#4iSz'ŎaACԨ+k/,1)-ZKϑUN boVIU/P>cYtSFtAm- a9,-pp0`D# 6LgфXȁqVkeoFvذ# H`s >>>:8ꉉ:vW@VD$FQwzdGw=.'nT𫯨M iG"K5sUR P#lOoܹLrĦdY.vn$2V3~@w}Wz7Z JmժUzq;dAk֬ Tr-BQe#tWZ$Q`NXy;$ŨFa`;3.^ldN*%3#cMo]%!X'ǎP8N*1s#Oc=t<8it+VVX+k^^^ZtW"Y1jzoDږ-[qmH4gld7gꫯtzC8#{^ TAM*qch'EG.4"3oaAv'ddkӦ͙BZ{LUunnn:Gk]sO\-ZqPw|[ ]lE G6{ycbJy U֖ZUs-C=tV$^U!+%͠AVOl"@c&vr:ZPXy.\GM\ &N/[~24அ 8CЮ] 2\R/:fDc?m]+~ݧW+7+į1,`lJ0oe`~s @qɲr]wjkHi 4DA{ 4pСCB|20" #aE< R;KC'~#p3 ]㾰 -5L[;3qmk֬љ߯AyzzQy,r=Er N'Q>lݱ6{: ce/?/˺,7rQ3Ix~w}ZZ ".͛u\^a:u`.8CXlPY lDG͘NgɼW!@*wiX$Pܯx*=,[`-C=#d$è39(bP-^c>ȷo[;#V[ywNdQ ĪgAfUnk /*s]U-X/T!h( ŵ/jƀmmŬsD37Xia{Y\M2HP8M)!Eւ A^Ç\E}5^7ِٳK*Dx'/=)ޚ txz4=4Tb9#aŻcW^3`<}:D[͚5K9]~]O,O1n7B  ,6`H P8*#XԂk[b{ٳ[ egdwӧdmtzv'Iq Y[ wK܋IWILǣ?OT_O:v$>9s%mfI>tX)l㗓@Mȩi|3 @nF-x%vȉi9MY(>2=3O" k(/]_͑.&O\rYy92M[x(> qR#I6^ -,1McƌdԄ[ HESV^Tb/HOTڵӅǃSȳ%O?TgiB>b`A{G({ƶz}i3 iw8asX:[6Kښȧ%u튗}* _3x8D/#C4:tH.GL+K.p;󴨁AmG.g?!Z @S .F ??_vٲeVA TQoTzu.ꫯւ$3P§)Gudz lm構(.(]sgGwGK=+&_$Wĕ ^fӦM6ը?ׅ1rpICfIGkp5ȔȊ Q Nl$@.G"n9/ X;էQF:x=a+!+ClذAڴi#'N)ջw%/hsbKZ%JsG ~:9{@[s-}cg-<~e6C%f:e;.[^KOO?\$UѣqjHfA/#F¨e"l$@.M"ǥo?/N !xP`.;;[TlySӦMKJPl„ j4{>tm$4Ԃ1#a}gF5 ם)Α*6瓉eUւw( &t6m. (@$s5]6X^ ,ʎ0Xd$~YdFKX{($8#9g;@ ڵk[}a(,,!v˃OJߗ_~%m:,YXU2-ze#Xy^l}* bWrC6ҕ}-KϠ`bhp=KJJ.kqzM]v > w4uPC C ͸$(EN9P:(& +ƍu1I#5 - /qxرc;\2TWn咶j1٥ >픚i\ Tҍ*'OImySqCll5J+FBZ^?X<ᒆ@{oŃ6pEdCõB ё7ϏE`^R=ل?zNAqRWvoË7|CBB7,TuTv]^}UfCɋ/XYZٶo>AZgLH#p=46 a95 #:%^ уcǎiӻwom >53ٹc߁J-xdà n։`AL,;x9)O5 {2nnn?l[&]e=gH pp/rqs?a*Iɔ%LPf=|Tefv+kj>.."j l2靑0`ذa:dmy8$@.G"n9/DGGkpܸ0f+X{c`@2YPB!믿/i=Dcߋ?Kf.>\!KHe= wuerm|GҾ!GiQ݆%>XfKr֚#$C`۷i , -(rl>$LP_nmƴ~zb|:b+@C }7R Fh0"{NdݞY>ZG^޲MLzVj ጉ\ֶ,Sކ,办QyQXgzиr?QcOK?/yD|0 GOYPfI [ ^B V[[`3 rP LR֮]+'NnHEkQ F$@D9Hgs% ZXsȁܻ0EEEYJѣGKza&srlιpli9B jeY(Ij|맳}XLn,VO8 5e\Mbh$ 1D B lRG, " 809㩓@@2a2Fa1\yTp ڵE`wQKv0:0X% v9rD &j0 ?IB& TYm&D0ym6I->%ԛy>ӂuv aHH0dac<7 42 3Z6R癃JDlݺ2!32H! . k>!Hk\~B|:61~~~ASNi_ȝ>"\|1=FR͖^Bu:mGReP&z`ӦM\}ҙB 0 1}ݧ5( F$@N"0 u|>V;JN,Gs^T\s>$b%eIBFtmd9.Y*)jP%gRX|R.I[7m8"XBX"RH$(rHQ Uz$9إ*Ȇ!6?:tL۷noQRgqOHEvMi?H:.Z6r* ݨW֗59H|w%~6m1 pf\*j{EHES^^ 8$9 d}UHm c-,X x9DC5\ xJO%dC_#}8>H^yG%44%:)C4n`Z7~! 4 TF-JTįߒzn\v)#?|PK-LKݛQ!p8I[>Owo4V9ԧ!S Rǡ.'K$@L_Z>)~ 6$0Mv'MȺ^^/rpMcWmm%)28j:WyW>7ܜ4%I}xyV$@$`_(r~lHX'!N^n]ݼ27&VI}/e͈C2N 5 nlJÍM~f[.;։[@ h7  ~x5ƗI!c#>VyXIQzߎ*ʠc8bs[Pl:}oϛiHH%`\s% .e}@*%h ȩW1EΑwԅK؍Iʱ . Wʱ=ħyr+Kӯr迷Kщdi821v ѻTAE#xj/|}oɋ+%>mH;bՏ+r깝~ng˸NO:c?IH)˄kHH&XrsoJj6ܩceZ>N~/eCM 򏘲e}[|:8SŧlW5_O/c$@$@U(U#7  A0GjTaӓy92RyA ;觭t:U5#-gg!Afz{_62B:c?IH)˄kHH&F$n9uҼz=#IwJaJB۶C*=SX^23٬]oZ,B Xp|[A̍+3c<7Ƴe\t/c?IH)˄kHH&>)(2B-p!ltq3irl^eJ xK͐ܘݪ7r|tӟY48M U}wSd[gtת Ч$g*i4x QAʂiѐ:kj)HǀʑgȺ5Γ @e(r*m$@$P `?Q޺R}Q^*i I_@ m${&Y3Wq!m^,3rd#k|Gf9irc˱כ23A6}woTBB=ry+qwg$kήV>>Hj!}lJ0M%O[w*A9Ey`sMJV9GY} cԈӵc<< sH))ʲ)eDH.!a(TFR&Qv- > Z=w1W+9{ѩn#>Q J tl@Q@wZRںc\/?IHZY&\C$@64͂&\  ENlH$ )~!n^g@AfD+6  PƉHH\ՋgLYдV§@ ݇+I"EʒӦT54[x*:ד (WgB  8sYr"CgN|G瞮FP=3QtQ@<%7%ղ3$PBȔ.̓L]&6O g. TH"B4@$@U i*x jԔ$Ӣk)->U6WHHl&@c3*v$ :5 JEԺ 4:'r(Ԯiƶ⓲ l1HH|9sZ  mDj'"Tt_N.O '1QzƺMH2.l}8O$@$PENY&\C$@"лUI4301u XK8.C4Z#tMM@$@$P9p+ TI`z1Kpc U$P+20*㏃ǥoj&.\  PTEIH d#%18{za>~D2S]is A uHH&@S5#  J ZO1SK;KV9em1 &wXEIIBfS1[w,8C$@$P! p FD7%;M;\DNAAd%e &(}$^.nZtWxMM@$@$P5 TI`4+mTV! %]ҳ@Er%/;GFu2E;dP& TENx7 K_Xem|Ɋ.w$8-̓GxIOx8’犍HH(rό{ @-S y!Ӷ t֬l5ZFT">MȪ'viiZ  PƉHHJ{EʷMQC'<I#՜' E؍H"0wK)6_I߿Y֪SƍWUJ;! EN؁Hl#A# fͭi=HHl'@c;+$ * Կβw#vrpgfZ HݽS!-N,8C$@$P}9g=HHBxa-B>lgki'۷W/7%#!Q颿X_Tu:qHHJ9U"b UɎiՓi#(R_`Y$ml"#[|]rm( @ PT  J B:6 זm3^_w0kM;!ыVE;d Z@x$@$pP<$ k.ꈮ2gRڠ..")j}ߴQS"8ե[N:CzΓ TENq JlypxmIPB䤤Jjt0@ mdY<_ߴS|$LU̞8X]eӢ|7y[Vyv?p7m7з?\G~vH>W1X|8DYK$+$|^;5 $?3Cq{8uꔶ'*S6qI>upJ pM9yy$@vBOދȉlm)V/FCβ.Fpip^-qe} \ @qHH PhxvBNǷlc7 i_iIz$@$@G" ψH*&]P$o|)F^LS JljkKJ@l .(@m(Ivэ$*A*F'|EҥyP>\A$@$`(r6$HH^_]NPڥ *\#b$}{i֧xT󛜷;$- 6G=y~rSve./[i)\>W1:YF$@$`(rHH ɗ&ZWC߭| EºvwO}IGJѴ%q&Ȕ)#:,0cSOV/_WVօ\\I$@$P(r>HΖ@V~<0O=r}6]9 ^\MNJ:;uw/;(p"&F$٩irm6ܘ^度*y :8@6\:7cz2W }ȱ³" =g@̼qMW #PuX] ָK (3<$';I\գ<7פQhܦ6NGGuǕ˝oʅŕ$@$`(rHH2 ˻w&.@}fP=ҲQp AzK2(v275UE]mIp1i9/e׫evo3OH#@S# G&P<8w~ҠM ?V#i1~pd$$JІrנrR~#O+c-;h=\" W^ !0gyTYv!er*/^ :<+[w@R2įao;aavkKO8ɌcCJnSoJ ]] tGNd*O@`0' Cqē$ 3$9-/-"rNz//Wl<,s7ycPrxll-_f$L|^j~ +K't%ȼ=et*@\9\*e|qNiPUp Cqœ% 3$o)i*42=tQW2HʐE;dT"7_P}Y @W飿X_^_Mv,b6UʸQ8;Ԙ٩q٨lKUI"R_ܼ|KMܤ@S'"bA|)˓b5Rq5YyJ["TlM 56a7M狓C -6E[}cpGּ>gC$@$t(rHH+Kwū=XD5 jӴZA)zb3%^elK̓D5%gIVVrV>\_kXҢ"/&Xŧ&Uq6k*TFȪs}.g}1< &@s $@$ wPITIrRmt6`jG[U%KR7*{})B45*[xwTϡӱ=9ja |U|M PBk,Alg#  +9V08K$@$P J tpJɓ8ZI׆ZRլi@ & mR]iQeLK9  '@/H do;3̤4D ZʰHâ5TVyf#  @s+ xewSxF$@$@$@$@$@gA",qW     #@cgD$@$@$@$@$pyݐIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/instance-life-1.png0000664000175000017500000006744100000000000021771 0ustar00zuulzuul00000000000000PNG  IHDRϖbKGD pHYs   IDATxwUǿg涽ۓlzR 5ޑׂ﫢((,QTATJ@HH6$&[o9c̝{w)M2O>{wܹvJ(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(J(/fCd`)B %PB2BU^^F/aJ(2P64D<.? iJ@UUU.DL6C(;H2_3a ;S}ݧUO;=GM Iar pfﭕ~^]=j!Qہwv00na\Z^^;B߫UK鱭\鏾FKz|@cznHul-8mq2吰B)!WWU5I)*z/o'F4#w<3k,xa`9w܍z5nId6X50 yQ@GJ|.O -j,p0gڬ)>{Yǀ /N3"m2U55#oM.;4ŕw! ɻ<)n&nた)1X߾ˁۀC?T ?VG/A%XumU]XF/+7= |hW ؀mOIuq>UtzOV/jJ:IR\Я/k߼.g520e\uu?jРZ]VZ^QuEH$"cX%@#sUL>; z;wNK}#HC_5)^MH9M~Ywm>ڟ5k=\IR)֯+zP`>&`'w5º^7xA3.?n!ݹ>__4&X$=Xd2xRx":X[}Z/0U._Qۼh9Ym g2 /~Z-^_vS:E !TYYY*]P^jNEzN6O!B @XחPO(RzR1}'Q7{iiŸX S5V*q:`3 ޮTk ]Go8wN$i}p&^6Ͽ;<z{dw)H,6<7& _/o-}~+& SZ}?h~I&mn^Ե! I&kT*uمeW=[~]9D.SYY0Kc"5\kkeLL?>O+{~'<_ү'jj*qe@Mӯ7hk1ߴՇMfOk&L[R[JO| ݂i^ZQQ&\wQŞk:tn7{~neqcWWW/4PJmZuK5--z֊]rYڸ(ύ+S'pi녢!|`rzuu/Gg5_b>d#c0<~p#^SSs%6v[}F?WSƯ4 E szykS1[Q#K`dwYIiz{ 'כuɑ;k?'Y-R~}M+/{\E!u?3@ب= .~*6fF Ӛw~Hi7xgs>-W}W }bAUUս8 C]Ig{ĵZi<_+Wi:F?ۮO 'C.NaD/@Iĉ8~Mpfw I-D,&&WSN_EnKX Et&PM*WK[gnfaRۤ-%@5mt]vJ!I$ikپi'>y [rf/;E*++e\.w_?^}Q㒋/FPB -^_+/<&>lH$TQQ zƈCqdZQ,b[ڲiVo|w~Gj[P䩹DĈ]%ȧ}C.F|gu]õ)7d`_:`E28P }陰ց@c)1tCc6Sy\Spb[ugF!X.׃f2a=RTq2{1"Q1T߃7k-ײ$ijKYyœ,W8~Iu (9 󵋡m z` b&D"|>GW] RKk=h4UJ =n%NS3QhxX45ϛ*l[qap,~6cΜ+g͡cp|R0K6wj GL4/)++QWWWiy͗>vbK6ܤz䟢3^jY֟n%jU{,^ T:g3I#Ʉr^_> NԲ9T MYzwʬcK7> kb̜vnC翾H]ѼjU;v&xe&Qe^,;K˦xf^_+^5ϛ@iWrLTXSpʟ85 ?'&͈0 gCV'/nx^jKr2͛7Fy5WvnC #B4Vѵ]qŬ+1l&IC`U|wd =@)EGb;^ y.o7{«]k?9ftlFC )R4wW~ s>Ǭ6e\MMO_O|<"s{P X}X噮Vnó>*bF*1o|ogO2) h d]rU_"uvWd+Oݨ6Ro!a hj„sNF<,J۴ve^IGeQj Cۖ2k]i~H;ݽ%ϖ3횘v=W߻æ8 YD REx/&.|֖R ST'VU(^ dz8z)<; r϶TiE 4AyE!.`EjE[ƿ|6S?sejW:RIԈ d]w ooZƟ>;kpd= i 5PiVRH/R%B܈a;E:SEu/p8}k>g]uݭLH4WR[׽cmhKMT!a0p3rv*'ԝU*"0=imUeY.eKBk S gG[Б3xu,]vqt)c G5I0LNwm`w-+#n!W Za ; FN.TFqA1p,+؏AM2rIKҀ?KYnn]w]+Nz8o6w[_ކΕ$bWŋ#˝'_Œex? Gtw;~"+ s$|WeSr:uNǫIXx/}܍PB L ɋIysŮAz#o7D6J:涳n`dr8$ ]mkcK+˦RMǝfTf4Im6{'{%_pc[vQbNxzX[A5H/V Xɕf%|jO91+'v*^ šXLZ/ t ZLJ=)WT<+heo((nAȱrfGq/4hODޒź7׽c)cgZ[iFT}CS2ݱ0oQ'j!Nff߉"+>ERoVU5؛%\|w :({_]}NkCX Ӧީx5,a0)rk)`ʉ?tV#OΠCFie}C[P܂\~7Zk?=+%.!;4(kssys}R)vž-S0 rW=2p{ա'q^rKYY]WW/[ЍcbΜ=Sj`ܝ9N8pfBV޳((-y@NsGNj@MRf>8p/gH᤭nFj79'm?T04YV?EJMWDIkGuE*1 3afʶst$_k–+LJ"A Ex={  z ^(`Da }LKK2@6`瑟;7և,KBĀ=6™%~20N夥3U|J=*'";N׃ĭp!5By[|0PPGQ:?y4"߸+\BU=-Y}/~Bj{=ζr(UCjeѨRrcK2Rڲsmgdi!lq$U8vj IDATq!c0P`/p\ (Qsr<{)q!߾9t>WJl)tNvfE%eYV3QO9k)vպCD+^k#&9J,įdIC+͆R>@ RHnUeEzbP)T~٧eN/U(%š.)\.v(nv nj<e<*C ?%]qI 0eD<(Hkzt[fɡe|嘄TA jh-MyU=,ŏx*^\:uiͦ;ʬlH4)e]0M%eJHI26Nmx9w[FĥL0|p`]Y,?caW#-MG@K^2.3"PPQ(nZfvȟxq"XyS' $5v)G ɐD}R@VfH) k_lx)iyNS[9TQl}a5mɌ.Nyw ƌ=ulӁ#Y[ɷ~~up#p)?544F4ftwTBC#iVw.!ҝL1gpGV=jA|9lHk^@W,%T̐ d& Eohdo-h"?Tik vtE 'Ww߾ĢQ\w9>4_xeB. N=hVJ)2F= OvuW!ViȰvW3W/9k:s\%–%z/y:%-C("R>^?zO)Q WTzknЁ}M'~U,#KYddb+mPBRx܎0zپz9G7~`5[Z<= ~,3M_,^ίo O<2_EyI*ށ=JJ۶U ;HeU& 㻟?jxe+2$XR%6TЩ|J~yjQj_oUM:׵kڰm wuš17%t!Q*edN KZ^V,&e跒 jK6^j{H:R @Kz#X7|O]f];N;֎4|&NБ~P~ms'Þ٥*W_D>zdfu2yΌeK_!Z'g k;Uwȶx{m*s=M6k„5_ZȭLrpBH v%?5Juت>¤twۼ;wB8V3vT!0Ǩ-T֢lw ߽|q30TJ;am8 ta]†F zWu-ٶ YVOP/3OgVSXJ ^u7vmPFPUQI6z|[7p܉koyWPBQjs&mja'dՐjq%Yٶb}G 9"y"(ܥ 7)aGK/w6鎥mʁže+!p! R*n c t~_DbZAQ/bR)#Auvu!~ Y[|M4*n'7]s,+ X ~*tz)kMuKB2ڒPВTNʱ޷ջ܃aVZa~'|;7sZVS?xs O?V;M''𵫿\m;Caoշ<#OP6o=NIW[(sT#.n葠*IHzc[;v*2F֕yS˱xy읆W#^z>3/D"NUʎ 2ZB m,haهN6A&^y[ѿ֔cKy&Pp1Z klR܎RMx!$ʝ}B!H$cj | g`; eI)ΥPJ%Faϟ+r>^|KrG&a#inzT&ã>ɍ?OȌҴb5]r>8Èib DL#YQ=«~e0BEYE FI%mޔj9FU ml|nqµYl&IÈwủT:R)Ҡv^+KϞtW\][[U&2idz`+Tp*3qN%bj:ZPԚNN8x|ޏ["-ii0q96j嬃O:^O2娷zpRowgQ51v*+p2zH~7pGq垿>^1? RG:`y7ɱ߷[-^66,zOp_e2ʈ0pGvT5&?a֍Pne7K^uw6?1|{xž<.Eq֮Q]U5_5r744tڷDhRʒZA,}fK`I||3%}$$f+KO&q4=HӌN6'3=?r)SY*~S篦do6F#t&u~g~;~۶9;Op۱?<ֶ2r>|.W\ WW;7l`gLrɎūu<ԯUv#@o/`F"aDX/CHh / fM;l63jum9_|J) ԫ (4M"a`X>'H'W*,),_ }>K_A./ë|NB.4M>0 >MjmSW_ _gBR,F/qQwzT ˶Au(U*Tvuunݍq& \uČoR#(,j0a/Z֯~~tǺui;敫_Aw*i„2L/c+R]ʷ׷lb}ŢѨW|VlxsRFԛ4z|uyf]b[u}HS;bQDI$ } ".| s_iNw񪮞\Hў%ř.y"BaK4u HbO]i< rÛW3lD.k\u!7ڮvl[2lx2٬4Nj~~Q,Z2v+ۢ<",X%:RUd>^~{7[3²;PR:~E/aon&,x'awyGyT*%Rꚧ,}LӂA+KHٽ,g9߄n֗@V|7MѮ^xuOVϻKC&D*dל~a0eiӦ3BHrʴcr!81tPsO_`׾z9ɏt3~k+W¶,|}BIos:Wn׶d &/mcfAw7rǶ%㷧L/;&Oޗe, Ć $z'-z"R D>+5?\ ?y2h}'S] +~r̘1mLϮex\jYW4MqgT VѣGc$)Uj?0H$XEyyyrʔ)WD.:3cTT$;?pݯw/z"g;݁uQVDeYo),[)mWU^Ʀk7 NBͱ*7[ˤv ^sTu-AѴFx'a|WRL&C4NC@ !bJR3d4ml:*~Rr.qyPB뜤l6뙯g ߒiH$m\.mL[KXz7@Uº XBp**^~N:u ˲dYD"$ t=x7F1M'ċO3C3sSoؼi=0yX">(Lw.ú8f8m]:QAx?Û0ऊJrVt 0;O4\.Ϙ1{ŰmxhZ qXg*{+Pto~BW~ipܿ{?E /S9"lAUEA UG&݆,4uHZEWWia(r9۽X>{=ǁu*c&$,Z;Q4(|.^cp*%%زu%{ZWZ Tꕰ5k;::&Z'q]gKa @8ouM=|)*7W8&6oaƍm]._!Z[ .IϺr J2+[8k֬[K>D-JtXW{q{wꍰD&)R C ar9r\`x{oG1 ~mWl'(2J텵2s/cyr\}1-EbK> ˙VRX&ɢ*ѣfr`uoOJ)HKw`NI}ngg'V"LzAj7NU nW)Nq+*^sXH"(,ԮB7K{3ᕱCJJTdQ=)du=ÁWZXeyQYYek9'6||h4 Na p tAUBW^JxEs=?&04H 1 Xzۑ:;ax28 iGV~W#2?}Ebd9kG"z]-T)L&pE(uh[R깂N "@N/~xÎJyJ:;چBS[x.Bz\U x5bm;eHË[y@  ˈZ,VF.xao10m۴,PJ )hvRUCiHq&D#8Bx^Rr|="2:ðϽ<Җqɕ :ZCلp-"+04Pr ç=j-)|j=f U8@6Uk1mcsMwaJ)[af͖e +G9f֓'Jq'?>z㷅i+E*E6:ނr&Le yo+Ψ*׳:M޲a2:seJ)W0Bω3O2SHJ- .xtϿ4[^NUAonWZ`Q?T qJ{m+V0rx#Sض%wpv< N%!L8߻-\iv ^m<So%|IC &G"<{&V/|̑Y2KNca)˛_0AkQt%J*lr IDAT¶II405A MY$+ʲe9J:B% R6j搊)W<=_oɕMa4h?-h&qޛ&ˆbϳl#rX^ذ57=?ee[6ֿiG۲yߔ]ϖRJYL6[á4 88+W~&Y7q#Ug0at? C.o+y%lE^Wy b#ZAJ򶍲%yi-QS@NwTуBsJ'R*rؾ m۩h[ )-+#WJ|c lw ^EX%?n6\LJ}ae J7bčUTz)[I,,,eӖo[p;xK U|F$bKi$`^Yb{wJ g/SdDyɲ$ k^q|YZQ}K{g7ç@Ed ½<~x #bS:lՃe*oyˉ8D,2Ĉ@A7 EUpoP 6xxrU+>z .:}J"PĢ&"@2 K*lR9s PG0PAԈKᦥiz&XnRdY]~[{l&&D(u:ID򛫝v"^Evg5KOF`/__t7 !d !a$6Aqx5d0W8 nʺ; TRByGnB9g+RkKT| T[2i}U#ck]z.|jƎ?]dڝ2d5j&T)9DJiYj# JYڒ k[G7EE2vys%N.>w:$eQ% abޜ! ,Ƣ{,C=',u@]uzk soN."trttw:^E޷+j|Z~ zcp}[5QF!9kdhe"v0 b*@VJڊkg$RHVViV8=^!-'5]c}<6|F2?Udae2Y.3f0>rq݇KkַrշnO[|}Ǐz\{p}ŽQ' Ւ\xnjUǤ Pк$WP[ؑ`HURNTRԋSDRP YBkYI_C}S&$UǷtIEY`ͦ"g5o2YEI+w㊞圽O#*D(yßz)CDֲ ZTՁ .`Jʳݔ/Pm7Q9- TIbS5o^ !oI%?Ε߻9m+kM7~{f.BL=#C=胨NV īYѼyʩqh gI|UooqbewxJI +Om/roy$1@| x)2) skzkb8nۑG)7N~Cd }R>7U+J_zwedpL.}_{05- 㲏27QǼj%C|??*+ʹshg1 ϙ3/]>NyYl:oY6iϾ05aPo멩o/^q͗{3wj(;<uœ-]#'P5l!آ(# "*]z?7rǭP:#*y{xn -9wpXY~e NMHx%,zwYS^msM2m4U9+^:aG?tM,f~E?&֭._XuTp*Ŭd%YiILKM8ɨo7m` ^Ex})aeC^edwľ=IRY#b—7Y `f΄;ҳ^?ŬC7ܤ%קSNc:gS`ޱMWq`-69)aa=. +G_LCXj?L e|˝&rʱS W JH7nlgE )OT{_*CbTw{=\'57XQ 4[)d "ˮu{N UeqViZ⡊N-mVc7̠w<| J\2n8?&dv{v%pxVBQ:ec4n86-&bsrA"W[W a:x'{')zvf9_n]dV$m=ȠXe O'1̒Ē$!ޒdYN82~jr"Ylsdh}hߜ*,'d|UqLFBB!Hy3H"tmz`{+i͠oRO"K2F-{'\OZ5ЀT#Y)Ӳz6iTՖ1߂4OL} ]YugpY$Oui'%dILAFɱ c,_L$ٙ˲w~=1?_/[{`@ˣGn6J+͊Jʵ[9|8]0OϮUU Yd0tt̠{Lr:ePQYCX=G@`'q=1sAHTxµ=U>X /P6d\n~nIWTgtJ.K lVd3l2:?cJjglH@i_=zyUpO.Zم+QR{,R-|!}}Q15v5)ZVU3Nl-+d (Bv}I'c-tvfO@**Ď$ޑl[,ZnE@UYzw0ehTU}tF^~m~y W]FtUU%䆫4C z3wk{`aG夈TLU}hAL&Z:| ӝ95oՠL! m$]pb0 aЦu.t'7+nHZ ړae:;v;{x~<5y2Ueҩ4.¶e)/WE yZ +X9k/!IPԷm0 u'TU*Y青ۢi@UuL[oo=Cofr+M-nIaRTT9kJ]Ϭ1gIT>~G }Ĺ2LvigIώ fNSu}\qAoV$jĀ9fBUqEgdHsiF?{+>jW27_3k.I0--HUU5CC-H ?*]n9@^sð6tΒ{4 DT^sn9X-2Nq$i)ғT[S椑Иx5*_/TU O׳a\vU%έR/-K*ayUpIWV ܂ɞ\r,N)"!M$Kqkw{[G9sH2 *K61~P(Gx  Zw+nlKfB! hi9.UTr{~% y,0ef/{Gx䉙oc߳cvVϖ{YHMty]RffFgvV <>wq>l2vW%\CmYTps$7Ӆ,ܸ(ŔWe$l ܃:еSI?-u{ؿ$*Z8+vfS'H'Px:T64jF Iqޮ>@8L`M:9k &IFX<”ݟJϸqQdRf<;v{^i*3Aֲz>>^JĬΚ(@>&t;k/'=r0w~:2`׫xy 5o*%[վռoT%pzkϜRί&%%xGUGd4]Υ!IPWp55~]Xq@Pee'vgnJ<ۯ%Uˮ/YWIzLxPV8>%1 BV w;J @M`>[-DJmF^h 55JC*vN ,^W"[ɧVT 'V;vN%9sOQVYU X*{ ^=Xxj1:@4tL$ ,1rߟyǻ~%F_XrY|'_,aùy$v85d RU6k j>HOO_]^^[)ܽ.O=-$9b'1nX}Uj* U bnHqZ"(ʭ|zT%pw|co9y%G(8s~Z ag󧒗ܭq~9T7[~xr,PCN4B (chjf<Ccܘs#q^<딖U0+|Z|5o9yݺprEZhkTo_;鄵n<"e˶d۽|CN8Ar?-wHr2!W ~gs*>-Z+WIJUt6VGhCfØ&AZz#S * }%)Ə`򵗐鎸`,z+naиr{_q?9a)0˕8No -Os?'; ~q#5B$HOB *Txbѩ?=Um)Yڴja9Z[c4D8jz~ݭLKPrzS\-[o+w(`6|Fn#`(ś`wLb&i]y!xy"ޛ}uSa:]:;oSvVue,]]hPƎBHacJ6| #,EQ|FfD9Zr\v_VTT l39'׬Lyu˫&H-[=AٔOa3lhhٯ@"yqMwoII(Dԍ euxjmm?[CrJ2$Ʌv7 n5(**&k6d@Sv&W?Onl 7W>䊉vy{לVmQ>YJjkk}J]4WѰ3BiH[ssbMרZRcp;d8#D2LOO]^^>';o*ek\s[mY}A|Dכ.*Y-am/a9--@ewդOeʫH^5uy)VnĹ GQX;WL0}Տb?On[+ʵO|~'k$auE &Pc<>5 ۵PZ-W_mlIDAT;|$It3rj kc#k8 %k>$ 7>\TVVI))){<}fSEW&39ceq\d)N2a%F2q$$% ,Issi|D#@ۙj6V_>/'HuIqEX.)YS",U}V(K䴴%w9%KDzcAE1439I6G^AeDɰB^I'FSLӲ K 4X*u"7O|9iiHwOUVVޚ!K1# }^Sv/mlӎĈb kKMKt#$"|)1U5+$<)sRըbTX>F;Zq2jy H码}.)]x`lVknsĒU_<v.*k/x|m̤&Ea\+IGL(btGfz#%\UQNJciZ4R$r>X^^V8Rj:wj zL*Y5u7gW.-\.?lDS39 )XMPN$S%CjD9vH2Ic233UqNzͤy眉QpYaWxB_ʲRB]2HG%O p-g8`.tBZscq=C]]fKJ|,F&NoF ~`:y-DƄv"綢ØHWlRdnzݤyx!KN_5ܞ5vb8|Um/;~cb"hR ek~4gXsiw/ݤylP} b$+=9!Bkw0n@N:N+7\D_1Ѷ*gK5J#󋀇bП.EwC",z\~s\A/^ū0D }[4@ @X^֊FMQ/< fu&Te&LiNXeYG!!oB9"׉-/f@!#+bxWԝwا'x vֈW4&Mǹ'<AwlU n: {7#@G!;cai}:M Cl:oejtuNqPhqsw4 r4]RF#,k’uep~zvd|u.$6M79~BChv v,4Asb$rg 4C C8/ 8~AdDžz An2s-@<{ &L6KsNڼU[A(^mf7in - iei|`t݄ M>l44uh:yI2%]u`Oll":|ie1m^U0aDbDE _G8I169mtX h&*'XuuրQBT¹ؚBXkLhjD3,\R!/hs;Њ g펅hj__] |&Bp/' m9 {hEYkCx՟@; R 7ζm4Z(ih-ŽDKpxN&QAG2>$wѼ /V{<sc< E Bi]M0aBs-3_fSBZBA3颏ا_l4 !w.h*\Zz3_?4Zx.p)*pLg5aeY ne-ڎǥ8 XvLqi8'こIrM'l <.Z? ܬ !uMue5mz3޽KH V7FGk~%ntV 6!ky#Pwx5awCDߔ CԖuJ)W)(0|x<Ұ|"NJP:GX<q.6C:Уn]b-M][)SIa;MK +^6GX<0_0۶iZш?S]mNIEyalIJ܊b- %ՒomSTTewT~٨,ǍA@^wEEWԹ죵Pغ,&< Tz,MP"SnayVp>U W- n#O~Iy4H^aˆJ4F#ͱHsMW)!v]w>ʯU ?+0LDahrs&zoux/[k@zPћ ={Sת kV LWltdPO/>aIu GK#ex,P yŢö-l!k;1Amj(NCX??2ؗ MiҐwNZ^?XG kMjZx,ʧ_u7.$KF;9>@g+g]`Kz` PF}$LAE}I-$̑n9eNږrۀ =PAayFds&*GZ![o=_MMry"79VayP6E<]2LԆ8ctXXU]L in8[ Gf-ۅ\Z@-w+ֆohMCkiM(9Et8v\`<m<YL> H$RZ8Nf"Ĭu vvj2ӷŏQnUv֪֜EK輰}u7-("NF 34T6U+Fq1ّo'e-ZˮaƌA6±Ũ\HA(?)(!Ⱦv**|4if `8&A'i *Tzp x>,T-vamcnݺ=nwy* žr>vZ+ Tu Q>F}y@N|&r4qӆywL|X m-cY #1( #sU!Tn]XU3KQƓ8*pT܏2?& ^?C7ԥw  2QV{d5 *y\@\2C~4jԨh,*,/rjY[}v& w.K&-cm4ӻGW$mc%(MyڴXG +) Ki;q5كGXe`^`-s[^dNcQ̧]x'o (YBLv{7ӹs"riȒ$h׾X/&wЀi$҉8ˉJ^wiX<GeMbSPQ  WSIDZhX S 3x˃#< ɭ(Q_}rcٖU D6Iͻcvvmx&Aflʖ}Z"Pf&npd%Qa0W/86D"KbHޫo #<˃ay TEעu@DTj?W_V99{~}6Mi>k~3ft4*Ҏ^_c[6f; c8wڀ\Wumg_j_uMayQKzM#=P/-]ʦNjl!}k998N !Q'waKf̘qn,'QcZ|/l⼋o氃dc섆euGb?/A~Z+B^)dZxޤ[-7_厔8)%&`K~?5@-$> 5Ix T;7V]Jϔִ] /áx2?9`,ma;)ϻ;w=S^ZEss˱s]uK1]xgB֪Ns#(X.*x3*ӽb8v( [ o[~rʎpQf?P+N@4?U@eȦ^!̓>m3~gADc}֗_1gy650ƑZP҃|ϯhl jfx ;zXU A'LS`)ʐ﬊@cq2D'q?SQK*ը$ٲoZR-XK.'~z|1H5ϾlBIq>R*Uf0eҒkM6#pְ;j:W?}w/|AχaH)1 e >ky Rw}b&g/6.\4<77h`߉, "%82ɲ^Gyڵ妦Sn#,:mP:F7.]tcvEr`;Z,؎ ͑{:e_Zi_Mɨ ɨ c۸ٿ֭ۄĈ6&g٬:BZHB~]ŝ r~lhle"iN6vVG*nM1Ui!=xifH$x͞={./,,w~^0<|yHh)PgMo(S2THt8ۻ'nC @?ϦW|޳eYc x fnɆ+Q BZ#,MX45H<6xªzFe𲲲 uƎoFާpIG{ϋN>FáC|>߼;Q=tBϲ/Oڋ*c~? aeC.jhMf+/~6ΫD9ɷrVRo`;}|B_[A;ӻgW >lk]|`03˳6QtF ;c] _:޶~V_= K!b`_Il{pSA0 i?fF'...NwrZNN΢WzX9>&gӾ]۶n)Q g{  \ T|GeUyMN;4 xx +}O[e!^^GMl/Ψ{7mez8tk_Mo{mc& !/ . jR`όk8.q3"c+LK2%5??wmĹ pͮ}_fٻsPIU_nuMBc/ 'BW]my /RWg~wyY~#_>lY t(vLU&\aZ.2Es2}Xcu;eum_P',jԜ˸ǻqkMF֠Tہu}7fJBޮy~-'Y~ ܨwGM՘UuFXt DpYNoǝ:B@>K=JJ`~#t݇zu$~ф5XwL6mAX[ko+}~@ (j\85m쭏GBh2<*`KDyaN%9!o`0x1 CqAO^K}nd;t3;eZnԦZ_DJXNjP]r{-@XӲ dx^ncN?DܤƙQyXx,y5V-(wY>BM_ѝ=j9Q w]]*f.soӍ X;_ y=HH94]^pO|M$Z0^ 43aMAC꿟fKUm^Iŧa?꿿'R;V֎:j~^/Kr m,ȵfC#Z$bNIpu|XiGXZXX/<ȠvѧwOh>r-y᩻{|廆KKKןMUV 9Ng-W@^K!,% /-2qm>^r=Ŷ-10 |`"wPI7"ZP[L'p&A6L y+.Aft2]z$$;cb~L~-HF"/VZI͆U3VɎ>ɰX KX~òHl %'䘗69gQo2akmZ9xw4h*URx)}qEi̮;./&xoe}}[k*>Ym~C.QyVUNȕ,~MƓm{[QkskMx p]9W[%}nЄuBܥisT@wNwXIC~̵adgk_ Hmef"-[27wK}1Xm4jr7PsMh3h~mOji*9?j0/7\ +5 @HP! %#EN4^"jb3lRӵֻL-: 6쾺QŅ΍W_Ȅo|‚iJik>`<xB|^\\Ɗ+Bq=lu~i {М~1 u留| 6Btۖf}/Z"=r*[\I7ʵ!O-u(ﶚ&fʎK<ᶨz/WQOz1QP]iRƕܟ>{ulTQu.SCY"_HDDZQ@gr@EY&K0zm@%W'R0}x.P*alRg^Xkg7_kIm3k[WXqH+/X IDAT/b|UhijфjEKaWgҡ$wަixcclk:iOO֧KPA WAn? HNZǐO-/jk S?oǥ2Pv>'yWĜ5;Ӧ3 ˍg`P%ۂ)+AEn{o.iV;bM ;_ؚ"AB2 YrqLM^!QbHhW͆3,{ALRBKVҮeEEEw|9Hd:/q GsG Wu W̒Ϲ_~8#E8>:,˻aq5mei((ZhOq XBMkQkP̜p|5PYEgCʏ~ެ_ńo=7{\MZ3| SZ.GE|m뺯`7ZNkDh!)(.^D?QzhMNk<9img>fMݦ'FT`G:WB;_DKhr2r\{ˬܫ[r7]f T]Lz.zϽ=H%DkL(8̂xOc?6/dDcgVN^^Ź[O;g{ML9t Bn*]j{{%'$ym2z}8H$rNnn@ 03c).St] f\k)y@[`w~7.e-fy&RsY碢 r&*uX>>놊zE*}?=h-J;|;]8,^gXUp$tC}6[0 n=(L;RQEFu C`Or.Z-.Zb` +kYh߰YnGjNV2 CThlvAL|I=õj%Vj{x:}ڗl :g9O~/o /![Ò0`F%f$RFdČȘф% O%( !O=%NԩZXbwO[@E?dK/:K o?<μ^:2FmG1q<i.ߚ(((X u瑕/S{OŮYhޭ9kwG|Y|wRC#`3VfiKLlOZcDǛ>D^%d$.!&`7cI߅C-t`>G>["G_'mb%W#(&[ZZzo}}}mLlcm;6=ky ve|*]ـJN:ስ(? f_ sϮ/BsPY0hjkhGп/%b DV#Eyo8Ӈ.-WL`Oɼ%%r_ZD%N,Z1/2+kd GyH=.FYaz^rX-a[VVv{}}~@@NBqчuFWw>n#p٧ks[ꗈ38=巾F"ϧU+y5vס=a$zSo8e+"|m>1׵WU3|gj +ʿ"|}zoS{\,?E"eqWN{&CݛMS(y¼m(3_ШbZnVsBQաyk=Uy'>v0 i΢!N|i5ꖖ8`>bktc NQFt~of_ XFV_;nqwk%4y5ppo:mO6S$ve y9ЦaM;*S}.f>{"N#qi&>G?9""_> }o\)< _=psoD,(F%Yb-5/Źc]{(0&/tBN3i#]sϵmx<8IΠ~>Y\;S9x:p|o8Ky#Fm;jڥ#*粿>%3ѸCrm1˖LO4(L#E\pɍQ9M^U͜ޮaM<5J{_kK[ez:GkR&-gQ+L,#8QI.8s7n _1}|Wؽ})0 qQ]e- /?p'Ήk'9t VXNXvQ>}Ch Ninn)}.S}q;c՟1}=.iwh,F們̛*/.-/sc86adW=zӵ(GiOBQ(9UQQf9&~ sLPEc5w>~9eE=Q 3gғ/S#!A0 Rg&ϙU-{>w21P91yu]lN 'FeI5H"Ӹ">K]@4O{&So3MnhD$s":RӠ oH>@o6ڿa֬oc];n`kֆf]X8<NJ86RGX5Y%U"~4 7 Tҁ@@Э(@JjNiN 8j7] kz&rgPij6d6m zb`% {DhX8X&}|}l"Y.+g=̍>)H"rke1MT ζ*|'"fv"Jj 4sbذV>˲?`+CJ;|Ϻފc9|k?/XXV,FTadWEx1t+Q%bSݔ ʐOM\>* .[\±S!)fNy|cl QQY7iJȗ&[i\>a;؃x#;#&"|kdu@IBWۍ' \G=|BxnF>I=J ^*zkT1Gsx Gny/i5łϏFGX=a }tqfj+V)J۝!$&%'UKЯH9?Eբ9U׽9%>KX+w ƳNgj5갌,^hS')rWBJֶ ݃ݴoK<͞>k@j=*UI`5 TtmLtu*x8v=6`XaI[8ӌzTq;W^SȏtR20#ݫ2`o.{~CW0tv̛``}A7]?lpٱz0%ndFk ELGXY%mMdm:snT# y +wRv+AZRHzRk"(O~5kM^U͜lR1_}Ce%:Jj"K,aRږ$&Gf! ԲSA ]9V9MK6$POH`E%|S3ggGQͮ^l9 @jJI$* -_:?>e/Kqt'ݮIgqi_-[= \vj_oo>^d;ً.q{6 El+,?\86R&# P0qۡzEmtLӠ2RĕNZ )%=s)SU"g>FCcvKy#a^QBq~VXR/y_YVݘU^Q6Oš^9gxx q;ҢD×Ym?eK;Ѓ|3?IZs L9m{l)Ia#5sԔ|4O$&nݩy2-!b+]cM@.3VK*5h呃aXmsu7.[}kw!|t~/{"Klj.\Q[vlHGX*Y%ՓJgnRKϴe+Ii[2ΐ*!?'EZ* MaF ŰA=٬Ozv+(Ϗ!Mm^9 xyoZɫSad=#瀿l{$oBDu[+Ӫ\LI+A\AZRI'fȌN'#v 'FX k"1k~rៀːH XwNiR&-*v/+a*t6H[+fq*ٓĈr"N;L؉ qDAO@]嘘H$1'_[?N0sY:2@/ ӄ0-vM߹. #%*v&NA%5bЫ*s +](ѶmVO>G{?>=d" D斦evV< k# zcԯ,ka;2Mffn KdjXɁTWFZ9ȪMK̢%jX>ߑGdA1 Uܒ,ijjos{ 7> CB"ȝ@dBqXM`BK >iXV7[ͬM&^ki!D'Hvhr"TkY]O 1Ɏ=Fѵ>T7볧j8*l^o#(|Dįl:'+lYm̰FMF>' jZ/FH4R0 [!jttލ_y2 |>?/>X1tWM R)ߩ6i8o'~8fַU̞Wm[5ͣ?ŕnFp1;" A8f9.dhٔ^ԗ?Dj ا)$DHʬD:!MQHJc(5 Q16ѸdDZX Hq,.ar|`!/)/&Uܙ352x0o4 4:M8n"ۍMWq$iP˜,4!Z1ɐGdsބ=;=A^iv)&ú ?@oޥf}n>/4tDumk늬xn,FOU>v.g5aӅ5G|T/LmjHyw8xMXB 8#:|C\5Ze@?pX:QyhWIyuqc9hIM.Cf_InO}$rVBc5iI}%fĝ !SIJ&*Ad)NsĦ0i]:m+=8wu5Z^0z9*s⊌AK3.+]K@ı)BKԗnF|O`if S `YY-"].Ӈa (<)4F kWjg]ժhRŧ, [ߎR.ܻ]GDTI=I2TIMZxhhj"жecy|Oka!Pcov'7j$z'UrE&A'"2>ECd+D>9~cd6EH1Wnq.9_>ʉEBa!>=z`s='Nrl/Դ|Q]M^4aySOoHʫM ׸D12,I\4a&i%_@<6H'0-ԾC"^@ ]u/LX`rWUpc&[k M4-82mhM>Efi;bI@;H5O߈:1@0# ;3Oc]M[YBdb5haQs;Iex<̢>έH)獫]ݣDshi Y~ڴMjkj^tygw!QK#*%0y~J8-LK3%4/H7eVFRoI}zA 2;s{r͉"s?&:~&m9 3bZday+KJ,7+׊C04́M;vw52 דN.P41@s4[K~aYn»4h߈ 1CHu%wOwk^F+ %rp\ǭe%.-K+ pd(.R?'C^ikze`D(QUn2P#mb m L)/S,4êmEYPnٝp*z@ڶON/rv1ry"VU,F*wǵ_EW=sޝcY;#d~0Txs݅L$i/b֬/y XKc܂%yRqL|"+<j@5uvdR^)6ef"244L-+r$ )Zj4 Fw^'[W* kKaл' "Q )/$"ddhMiS+-ekE :c/Lf  0: HGC. \Hs$sjyв+]pZi\id]a|‰hAǵ E`%0÷kX+`zrmç;nz&-3 2 MhG&PV]h%SڊVYcҦi f2d[ٷhz64(3ՁдicϼS`܅ ChKh;Ƕh^8fn~!$d3&0K馛,ZsaґpKl6tHb%syS!gyN}7k ;p;K W^p"ެڔWyȶ>EE$n CH܃dK<$4+CJm#SrC2y3^2)S֡ ,iI|'I .MKhXNvul>aM'0,ER4x+zfĜ!;pMH^ȔtVR"&&ɶU]K]&$֮'t%AŪa? -3l _}qѕ?rσ?rۍQRRܡaX>a)UN<l6P !Lq9i|)ڹޙ0XSp]wiUrӜsڡ;dJ^q¤mT%c)ߎbq26i+wrZ-2ʐvj]fAun갤KBdaVHM"LPi$56$|a|b_g&^Ș e֤KZI,#lGaarЍfXJ:M7GeH-ilULS$/mu_E"q|+C" /գvb%GSmaA9]޼uC|wӌ~t9a[+ײjTH7 D]Mf6-Zw86ȫ/ӄ863~|Pp|uZ>eCfH|:qa; ǥ |S,X;COa’Rr_GLjlB:NȶT+o.'˯Og}(*܌x<*M^Q#7ӏ`~?%M}EMCEܒm&.;d*/Idg49(:h"jˆ\ߎ_ kzM/9c٤خHL}ڪcil+g㬨>3,RDPPTXb^/%1&$["ĘQc  ҫ X`͜cʝ{Dk^͝;;yy2'R@݃ .~(&;!y&q"&{Ib  O嗝lj'y͢{Xf.`RV$(ɃKӦ3s6FG ^}36Tw)*,ࡩ?㯏LwN$y`2aD&q5Eil7ŻxLu LT]^n{{:{7V XuMQ^=Zq 8#ɛg?an*BB܌yYd=M ֭\57>pM<x~lJii X~?;vꉩjR E^~x.G>tv{ڎ:bb7\ɥe4|` ӵ\uýo䢳 V cbH\6f*ŗp+$5T$dgZnl5 _V~g V Ɏx-ۣDh#]} gH}WGBi\W=5$0 X.8[?>'z >hiItR:*ӃDݷSWď%'v!-A ôqa\Kw/Qız9yƏ~p1\rt?<>sC([(w/+2ywJJƎ(NfIq; Ri$vӿ *Bz{%OtԊRQ@#dŏS, TǶ&^um{QvI:\}Egeowg%?zR{`J"Qh'7YfdSڿ$p]xuȸI AUUn%';Ksmws3W[le&m7*ƈ4\z}/O~8/3/ox ʕW O})EK67DhiR4S): ERnVY ىŕ,wT] ,ͥٱKLy{ޚ/XVc@;=Ġ@@&Pf!\z3ZmM)$`x|_8]ԄJ{(d:,wׂK!^b#K3ٱO,GaF4m@j4+缳Ϡ55lΩ't&'L9V`#4x>w7 ,qɇSǹuowh$ε7H5`$B P "- 'UB(W_;QOYA \fͷ^|ůk.994l23xg8 >5\sw6ҪiϪخ5_"3x]zy{ffnYVs'eݹ>v=CK8i?Jia=J??|֚iˢH$҂UcO@Bt4Acj:Rאeaʱ.(͑CZ~$D=AX2rrlE %VYbgJIK{Ï;`FʗrKR١W|&_aru/YVaJZH)F]#*do+͉z-|^\\ lX%;1؍;XTZR$J O%pYɼs8c1 㕷_KbnFi;@ eXξ`$#1C1-95Rh_|k._ r܂ aŴ|Z|clٺN?j qՏKIT`i#S\[yIKw9+zzi[֮.(e|\y7fGOϐ$RJC> BI%κFnsM|>YՃq9\رO>t44󁦃ahыyN=;M4G&y:3L%'|~-;<$4ȬdrsȔ%mj SyU˫n>>IhT3M{r2i3M}?+3 8| D nCCt!5+PU1=T3 }tU)EkC ]t_Im,wp/VC,suSzsM5[oփ_s~>1p cDW~0F<7P^y-[QQQ3?>eP@ #.9`v{[Ӷ,Q#*8+&o͜M:`q WR&~4̙j&qUcd(?a˦Uf>~N>n,-+g(/^Ư>ёHO]Ž1Y},r#Z33 O]=w;(B^JTmO cH`+5rUs#Ń@KP!5e-<ϥeLvY19!v X{vPTT!Dut]OT%+뺎+h[+bLP͝r &SB{!&dJieQ&ֹ(5(4=OoU4),,dʥa1(pvrY^ԗE8]g…|>|8---OX@,!l))#?;P6@]]6V**hr&``gEEku [j*SYQƕCGӯ[q ~H0 Vsr|y駹+9묳9s&>cW2ؗXN᠑eL;^/edE յ*4M) fH[u!4]CH0IaI l߀ؔ Z"rͱk;Kɫ%tk]A F8 JRko啮{M^NK/QJ~?B$uk6zO>l6yM5"ҁFHPjjoZ[f|`F&=qhllͷ?`-NM͎i/ٜPZZG9[kbpFUr嵷0 _Kd2&LZbŊ2` #4e رU|*jZ4f͞Oqq!YYtCi-Y鬦K81ӯ/# Nk]^[C]׸@^-cS9`9l-Α f}u=PJ}ax0t:s^QE[(nQ)]ƝHe.4Rdƍ ":-2miR 3Y ba$@):Ư"v efa!CH0˹ה`0aÆEE"#*R۹QG]wL:mKue21tX`r,tܕNᦹk)<4]y9vvrrrBGWwZ6Xvq&L< SN= o~}5=l$21xؑ<۸/Op>47g^({Lb̘ذn>8pVuXMC8lMYMM~GR cFcȘaJ0 ])4%1"֭d-471XIvy6s/IWWQ#1y )]6LXW^cˎy>\^u X'| RʠRp8L0LkAot]$ $Rw mB怃Z77g ,h<ddFQ `̘1[:QJE%'Kz'QHӚH$Orz-UKuP(D8l;b1Ə?s-酪aqu2|X:(' 7N=﹕T]QuGxxi[|p*=fFt]/[v\izdl)yyyN=iE4.&6!n4L͐Pu bq­hgLgeM@K% `/<@TɽrM(/=Li%Wy¬>nܸx~z'x< I`0kSuz])hhFy辁6koDTO@rx3rL?xwBF;XU (0 U|>#Fs_\@"^v~M?| 'QojzG[ZZoqD~x}J*tYu VӴJOs]W+P6yUϧm!QRJߏ4(} \42,Ơ$4P*m D)/"r@N):y+h;0S+G(˫]XV233踩:6upڥdYW*b!e/ص-ŅD>'Dff&9yWuj ž·X%asjݙ\V 2;_F+ 2n$ICSm͛2tP͛GUUU9KB9Z^-Xd%Jk/p>p5V3õ'LĪE9mȐ!wxђ@,#ws 0DF 'XL@a ДXD"]鹇reL XWɅgy`h4)J)(,,$;d:1 +0liv>@2rb^m-`0T:ڋJNo&c7X|m` PcgoWnNmĈ(P  BnZsKVyc91Lt&iE]  VI/g`r+)M̸DS7 Y^u Xi!ܒ^S4Mk:CHi|>OCb Z^ͮʍT 4nJNJ#t>%KvE K/'^VVVQ% 7%f O+kԘx$Ov9vIW]]J-K 5LTHz V$aǨlFr^H FNr42'W -i($ni w4iA/Pu~Sk g-BH!L! ÛZW}#?;02 hdeeQUU#ʜWP%$n=f=be@ a6ؠe hPyhBA 5mQ| ag#RJ#Rq+e²i ,Xl)1 m [vJ6j4ф/ V`zWuDaJӰ,NSJ)1ee@)jQ׊˖l=W؍5,Rw%% MC~CyUTv?Dd%bĕLcR5w;7mViDh 4 t]z'iYIO{"Yq :=.X6B9i)iHAޗ cn^m'ٺ`yr9S~X>빮#4?㝏R8b$m@_^;я-Qa̢2IoΪf㬵#MLhMqIӔTVi}$jZI4#ڴɇ S~XKr60ձ Mc bX?tiTIyѸ"7M0,a=ܸW`u{8ibHiXf>b.jk`,,\7q +_x9*R%U0=6JSaq }|z%^W=q(q<Gfupc#//(PP)dAaT2h4haUl1;GhSSIdzGJ;kl$s hI-5t|J tR3Q(^TwkEu=jz19lcqU<3us7w%{Rit\kK-iˤ5 1]çk躆+45An"@b@ -`#vmx@ڵfl'0Jږ4](!+@0mСPֻuLBꝫPEљyaeZ۝QURAk `wDJ4/ ǘ&a c҄1a$IߨWO?CV2v F Bael{=b].=Z#~”>n 7>aȤ BOF AM)eE:LwO0D`S8Dz?7ItVI` k8E<鉃|H-8 m{]^}5=׭SLx?r s\~4`&cpn C SYhB_:cW+q:z4U[+u-$LUaR X+ek+U%BZp1O`s6I.Ɖ;B}ml%/7hos]&kF[{Î_Fin.= Z xlIj]W:cGdXEQZ >!ijQ4%SK+ѡ מ+ɩajRIndrVV }~/]K0Q7;ÇPPmȫ6u L +{X#0Mn\ҍ :|3n''Yb5UN堒zKx)`LpJ%.Ä+^. LeԭU-qN7,#],n軥xKϲ)Gh*F ̔c'\XK7WgӎNaRRJt4L^zCjkغDQ>t90i|c7LLtB=!N`e`Q4^),fU2g\]64M.%;|#+ ,oܔETrrM;.DJqvT$e"^RebD`(.dW[8Rdu~,^UO)pݺf,#LEUko#L@m!Ppǂ'9cI]eӴ)Mxr]kK$RC]4v|1M,+91GTk0႔@U0w9׭텤]qcF~O[nEry7Xtz|:O$+J_ZHـxu3{ˁѿ>4׺'grIOijnI#&-6XL}RAi⵾ZU;++i uXd7dŚmD^oT?s4D@A@0Ej^II9 g_SH+hJ+Lgǟdude:N!Y8q+q+EPLaο)׭׋C݆3M~6t hH B(%"ZﷇPkbXefzI)1cc-Aɤ6JzhʢKd\Y$TrBpX)0=Xy-$;ɮ@Ä|7}ʫ7;k5Pe &1- 1Eכ GHeڹRIZL >;b:RR6X9q+@)墀OwcQ"wO/u{(Uɛt[VV&-(8bbϼͭ<;6aJ)&/iXK8#4akgghnisSϿM4n.-G˫b ǧ3L пu|xxx+4_`ζڶê tؤJtrX@c@倘.=d:+:{!yeZ58^Wb؅s$>2*2w'ITR"&mIP5aBXd_l&e0 V^ Dt˛`?6k8wEz0@6'ebTƗ,c:?&X ~r9(%ٶc'7_-P_Z[ͅ[n{'L0+<*Ç %;sox?֛._zf;y%T[kcL « KO`!0v0RJ ėSmv*v Ͷ58 Qw@]8`jnYEpr,)/HP4@@ F2KI~d7`g`[IqX]6ֵqSy%U0 ln뀰%MJ,+7W +^1,4Гk `Kl;De< +l:n/ ,?p}iW@]5XLnݬ ]u{.+]h_j]=Z>)%˥4u tq k- 3b:@[eSm^;4XUG`i*Y^IIB&-HailY+3LEMSMLL+dVg|E>-e7?:W^Uϼq˫nD]t'E9(ղn Q^'\ҽ Jn`ec@%%Kc"g$_W~7o6D8`0mKvŠcm׹Tq'[)A[blHr:9'CpXRlǃ fe CydBdAO!^.ΊZ^?Xau`Ln<TU;Â/ny!F Ay7u[g=`(kY_4 /QVҰWĶyH^ICqWpi8V@gGsSyͭ8c? T #;ߵa/g]yeJ).ȓFrC2SG\K+;"=g ^}sBǖW3oܧj~ p u7ֿYCO%(a4 ̔TZZ:fR(GʆB8c,_ǫ_g.Tؾ HN V lk ٺ2Vv ׭{w=4g?5w5k~A%'1:K\Ǟ>Rwe$[iW;Z3?[iQ}IDATQ  rJf08NU7T^TRVKV(@k{0;v2kzX5y>Wb]S+&#M{/C,i_IĈ DgeL!^Ȭ$PdYZJʆ_;%EMd9ٶ z2qݺzc_Ƀ< {GTVb3E5x_JH۷ y㽅dddDp8|=o;(r}9duLpBԪK\B В,s5ƧkO%+o^Tٔ;y~#g*b e˹a2ω.r;F;T9Z#2+,nԌ9vvuVwq~AuSjN"b5 Lu{o21h.WV⦟A`U^g `(%J}@S/Wg+233azXB3K757;╇- ZCe\1Crӟ˟%ɫ7z,g޸ɫ&w >qgCCnFX^Cމ0Q+Ы *3wm:j}|s浜3f)MRɧS+*׭qF>%u[-'ŕW\Pyc>$'8+08峟*ڏJJiDxxU[{744drDOnr!e䋚V#F'Uwd5(򹢺o{/ڥmۂs?[l3o-{_fjbͳ .?}?DPyÌsU=Vǚ%OR($OucCg;߁RT~3;aq{S NLy., 3ޱi:јzU67_ c/sgHffπk#jXӌ7~?QSӧ{WaQ{~*UP0Dw~+ ]TcfJ( ]mg7Rяvx{zʦy&?s /'x?rhvUI2BNيZԂdvwXi vC5oBClҽf;vXv +z6㦟]Ή~f[?*o;N̷/8>ak}~X~9y~|һ<>+6gEKWiyyy[^ŐP{T^!9/p e}=dvg,^^ʫXW?xe)`iܴnh1đ9M2燿ȹ|pLFi~ O,5*vu=Pvt6FcZ~ۿ2fdŶoA>^]>p({gJ;7>bk8|q4ڥ;P]?zWBGn0Mt 썱WUIv 3 P@ A]U#qE[8MuL{{Z+#޺eG[7:Nwt|)z8|_~8&,UA.\zc+抗稄3YvtdL#j3R{xĿ:u ?=d?ZZq0y<›s\xI$4|3.W8vGu?3fB??^LJ^WzF^f~+8 bHΝIFaUΕsxmR/vR'G}m'4}NjZ)데 _ 33Ȯ^] X8潏sN?74o1|\z )WI.XmmzyNPً+_*W7'r& ߵR0j׆W-oJ3R+vD7''=z:j<UFf&e]Y6Z6f3dgvׇەѝ2X?gOK,˰*A;y5u1vPx O<;>sq}823;/7xq>WX7sV3z`c/|em?W,Yw`]d(i*L4RRx[mM2+`u6]L,ׯ,Q.࡯ RR~CYV*㫵zL^um4x7uҳɏ(( /ϼ0ŧ1[?6jJG-/?srr^@c [$~t<=ck1:uR%]&I@xH$dnnsߴ6#VQ8ΙߧG^dګ@es{~GDCSk꧑H[^gV+ĿuNBDtڙOY3́FH;~~ۡF^)rWn>Oӿ_!'s(}(EɱN?/ea56sxyiZ\Ӵ"XzǾ^y5W_&Oݥv.9/]2oWt1֮ﺛ .h&ft(=`@tvvhgzd <>'DJLSj}nV8˟h,222v1z+ͯz"q9]Mx֙s4{g 'Zy7ޑ4*((xaCF_^{<`SkͼGeY/̒ Zvv---kzj^yky]z}">Vwru>L# Th)T$}ٓ2;zY-΀X_~~X,݋!dd`Fv gf-۵ܜ9-Ws{]U+MyVGщ,艼R!Rf'F &sgZI: 3vTIaނ? ojj_ijynj羵kZl3b-''{mKKZ 9uU:nid](F^'O/~;U.,&'֕w[i+=/k=19??ƦAz:ﬣ ǣZ2kNk݆<<9{ -++z^e}:ʧΔt@R)nkvb:K'K9qyt\2"];R5輆ܻ _ffO1YiGR\XЭ wF7Xk[ [-`=FJw:5~W܂)ݝ=v&bjtPvUVw)?FqGyg%&ѡw wf`5FkK6ce Mbz/s dmW`]_;z(ξ ԕme呓FkcG3ntEU*M'NZv2=k.p7 ezv C9\c^r Y- +;g9}Kc*LJkENk`<\ Dv!FԘt&0H}+z, ɚ>ΤMFƾ?q%k̀t X h V6ⳅ3IyhJL h2P!V#? '1ei\<lP)@#ؗ .@lK_B =2MD_B&M{x\ýƷ5w^V[N )A w_&sU T"N4|<<3J`Lߘ7(BY fRO5v$,~:ء/j#.Б `0:Qz`vl5>`gcX~Fks=۴\gη}2->*l]ScV ې]ʜ rH& 1Y;:)}Oou M^G nUF0t/6UN 0-5*5V VO}m]F}"A ,7[͠BCo˟5Y/S~[4JZ/N 4"&@!6ء] ^7Z k?Qi%>YOAwac-մPAs7nESZWʹÃRvjŇˁf}Y5mҗJXud(}մU* CrğUQ*v| ~:)˪9%U+*d`틘2%33:K }]=h1mF=Q G t`SplH2KVSHβ^UF]/'}T=GLg =hOWD du8sx@|hC3)`,bIZ-LտI0uD@uLjF|mxͶ%;#wvB` UHU:VD`;tP8ӣC%\; o:>?) ݃Lhj>l;~ O؍DxZFng9w ݜ| #bkhmj9 ]C9*KfwB}8(>bGkMA|[*#Ef#RyݎLN:$)? 6Zę<(y42 9K{?vV"ۉ̻iDipz͚x$jX(Ϫ9xF:Sv:n3iz1MY2o~r[PA C_+\>vz3us!f ڡnBZokރ]` "s^#2U!!L[kg< B@֓rYe{ V$L T- 5`d- .|JLr ķ6@>{>yw=UOe#{m_:JY8s>,q࿌$e IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images/instance-life-3.png0000664000175000017500000007274000000000000021771 0ustar00zuulzuul00000000000000PNG  IHDR-JbKGD pHYs   IDATxwU{iܞHBE! R(bC%>BG "b},(QQ$J(!~g>{IB)s{oZ{m$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$H"$ZbC$DI$4.LbnSSJDI$DH2H\^SSBO=A=tss3Q#b1DI${V@޻~׼^?5_4YZ=7M`Mm<ߏ-{uk/mo2jHMM=-P]>g?aNR566N8틀N`gonUc {8<H̦N _<BYg?^\x{Eoob؊ǹ*ۆo7*xwdo;㷿mg(`p5s =&Y@j7yÀ6ӡ+h2r/ p4xȊmi;'SdlCCO\Y6NAJbeH f>mNrz{{/IRRԗFҿ < ;>ZI` 5ǝ:?x([=|Zw+nC1̮ &`#L~~bΙ^ЯK{nV 8ܟZ<kQ7eU79Y?˃ ֺqUӊ_Պ2۬}0HX[@_6`?`>/'bӦ۞2)͵ߏbn2P\!3叾eݿ'hڮX,~ H]>mީ*/6!~>j,R}_?zVsG~֢fP%暕s?4Gf)`}}Bͣo_ëƥrs__m<e">0|7<)Ŵ5R?\1}'Ӌ %xvY0ۦV9gр5w Bq. \0`˭L+pA3ݖG0cղɰ\'`4i[9ؾiƊց s+Հc gM7nCIHvxSGg焉ƩcK,SO=O~w~_XGGǧɶM~|[+biqK+W3/Hq =//?n?KP巟iFs4fU Z-Ək%M엷1GxCC=?"XO|#[ۃx9`Τ<`gJ/<2d/=v_/Uzkƃs ٿ}@0񶷽FQ|?K~tQ'x6.6{-khc`Q3Vfm׽}?F*23\766))ĜkzuO yۧo~xy^6!gS vuocQ311~D~$ m\X? k_Ngj*+)ڍm{8~,4VuaH=?٤(€B[ahuyNt̛.d29_JBÆ7\wRSS<+Bb ,Kolj>Y8( L >0 K,Wa?*1_qI"uSWhS*Tgl3чW[LǏec?nt-!f_܆ۂLԃJba{8X<&?0಴b&Ԍ$-Q@\KΒ޵UoJcnq?0+oq#'IYhAkV4S~ Mq3Nv.90X#R2ߞyQ(ndѺG~ ]e2p/^=N&455}+.T^xŜJI*۱A_Ou\.n Tgdž)pۧ29M"&4H29%[dce#YXpq1۷_Ʃ)[Q}[P 3eO$qqɧ?&.G;k&S|CԟR=[W;?C#=k%8\cޜL)#8?5ڳGe9 Y`MVP} g|w=HUʓ92*GAq#F'E<7=Ι| zB3y4 sT*_b  .C?6hT[Ζ/%@D` m6o|tfOnE)vkZKg1곩qB=ԕhɣ)ZA,&hLiNǐREx_yWnEk-eVo-P?ӡa{-AHkq+$}/W "Ҳ&!`I ÓL,KVhg-BO+I>5~sWڀ团׽LZfM&bg?%.I x6ժW:>(XY`.[`TӛWu鯒tVV\ ZZ[,KkZ S\>ty龏?JSxfт#Vw<G:u VΗ2+Um iJClmf-@KRM4zǍiI6HmvVMKkwvTE)n_ |>C H[pׯ|,ֽu]B)jg毼!BȦUW`^z %oV0!`X="\;jbŘ5u$4)Z1t)DhdvfY=G_YhAoV%eh589 6P{*}QaأthliZX[x߽WrFa#f0e?֎!UNUKpڛ B-w儉{gq [ֿ :6-E`'˟nF3vlS4¬*CZ3>`hSt,v1m=x Z6 Mj6u(IRq W^W,u4^&_3C@@&W8b@NBW>)*-hͳ7O]*RޜKA)J Lk]uIa ׉B^A5LFJ{6Xp [w{:BbYgRE ͿSKo-mC 8 |B 7M<岩ͅ-^<U̔/u ̫<"><Dp}] tKyuqL:2.D:Q=W=Ui3u͓ hQHyPY7ڣL)gVhEsE\}f\H]~A|-UmY_A܉SNuјo?am*ZD8RGhX2`ަoXsƶѢ%eοcϢH!+qH@5$trHzw%Ht2[/oZt]1Q+2+x6 κ PT.Uhg X(9B39˵ 劒󩤰,W+ĊN\察ޑ&3i_~1(-p _9 @,aŬ+`]!F LeٴX[ +}2gpb:t/?|qݑI98LWjy3|3^/H),+MT')' | ^T,[XgI6x%*ؕc4!0O[. ټˊ~zc:g?rBpw{}> VGMکiʗS:(J-hu%Tж(esWme]2r*NsL8= zTGkNV.\''ޛi-}>n1:$>-)_XSXƐ܀&e%v+݁UU,˴6KP%\Bbslg8*ؒgCS>.6pi6mfdW ؗ]9e f 'btxѵ v=1޺fkָ\Vxr A|O֍/f:7!d6)N W]WE}Dxe H:ERvlƥK`ZX%hKG TK>}:f+ [3+%rt8b -MWT*sj*܁*pV&ZrWԶl)ٟ 簱'e{&rOb +io[ !3Bv/KbuymO:U134~6gz2)K蟝  |(i)uǮaYfe!a=CsV1@)U7-MRvY 9Az%}D5veNyWЖ+  \l arMsu5V?^)̛3ݾ6ӹN( }%#eW2AAK ܰcW6â"Џπm"Ѣ_WDkv̙>j4C>Xh;fhAQ (4R<XR5$@h T|:8 S&1F ߥ`O-кdMj47Lv[kGnrI˽>ͪb)3$r^@meeYhh8JB#ZB TI/.`Xy b4waց‰1A4tN9 pׇI IDATB}吥 `V'GRhk@SRGC tRϲ,yɈYYn$|iUl߼V&R7I&kIkvs ,*3ݭX "Hm`.p@REbMڸ/K>f*S(~T *;:2$]cn]M%K՗CCᨁ **5qrfpt`>~IƲmC8{ҩա?6FufU4ԔUY0e*_* S =R}YW>Ľ?̷vS&\VyEz:e5uC2ɚz-e,Nn;H )7J.۞ڣ}pU'Tn?lXN9j6q9PI kieu (àt% )pW!(lYץí󄡌EjDaZZ$xׄԸ 2*Iuf1dMlEi  '(+ %j35c"w}*7X//W ;>=NkM65]uOֻT]pbZ h]UeuwK(pxJJmgЩ#ܘ A_UR6P%L V~eĸ1hIm,(DuT* gg%YhKVMǂ‰? ץ'2v,1+*2&N%*rDYB(Vh쉾Hp%?.V\tL)) EןGcCɗW~̣OoǞ|^Ɔm:WIJ^1&d '{qXBBWvo$N7EQ ?p恣#8dp7 GPVyTUr@OwT : x!*(g0Op -=YwɤoX-X \ \ԃov%sLBʞ:OQ5uB*aiX  IץSD`g\-޴=7׵Kk)CkRۗoQ:RΙN>qѹHlV)OyY!LgVWpYZhQZ'p 9HSCHir"[hUUUV|(0l||ҀATWfųRM -y{f.UoXS=0g,()O+:O^)?{m*@`w!k2k{J#q-~j%Jyܹ}JӤV ?.9j6W\=^zeOs1[Wƺgx.ՕEWw5٢V}kرz?>ޙ2Qm]ʪR/Ɂ)|r=ϭjw>sKIdNEUSP }B ^~ULLCJ_i% Y#;|fu<qGexU;8w4oศeXaTB.;L̚*[pyxJ[Uŀ#*> +e;7Ҁ-w &+w ż‰1*A0 W7/SRL-j^A cha.$gtF>HW9ĶqaƧo V_T/--77_w,x p쑾&A8?=wyֶ ׯg \z,+7\K0`0,jG>r晳9d2"fo|POf`)zja0逃pE0 bك|IdUjixIUeHPU5 P u{(h%Ӓz)޹fтջ=9vSOBJ锬Mj7fHҊ^eiGHgik)d!Ì@rv^]9ָ,V-_kO/֬QhπQ2 7˜wN< BZ*syvi_/?_dxbXh;ʷT\5ێRj=C)Œ%KO1!+?{;D(c̔cXY2<~?xG0qh6nl34`R:'#cBx.Ku}?pɧ>K |5uCZ[=?d'HV;N+euCy@yx毿-߿yJF7q?rh 93>֙vbx 18PX sбeu2fTa0SU>p[ џ%O?AR%TRJmZi:r. M:9g>쒏0xH)ZL&7 ;O`QZU˖W(7^V> ,RhѐN0&B( '_C<&ǀT DIs@:()!&7$cOYPT[Ɣc Ik#K)4n6eR) Kw/zVٻ*k*|WJqㆍ,_F7F(}3QwĴi G7蠦1 ֭0\.GMMM`Beq*D8UGϮ\%x R K/zR/r9 o!K$Kn@+#HMט*|7/=˟ggkK)RKBı{~7^V5\#;? :4q3qbX)$Ŵ"YF#xKJX+I%z9> dfGHᄆ Kk2WeqeSak+zI= LgxMP(T ZPW !B]%u]ƓO=È ;*y|͘#Gr]uBoc I'' t}cccO/ݮ sϝ4cƌ<p])%x)e{tƍϕ}ߕ>n#2+r,SO"Ʉ@e_{L$Xj:t,hvvͺV3|~y~ܗqOJmaGȈQ㩩Iq_ G{J%x_/ȯ_{=bnWߨPoxyz2FYfiJ~ aP(0ml{ٿ~~s{ bGE*W,@rPtQ.Ry$6 /+9+0a_C㙥Ӧdԩ Hr FY}xVְzNU thw}bi}1">tfu9䨣.AWE Wcᥒ%&qBEV%AQVHD;b0J)<Ϸv',OIa*+vn?-RG72CʞU_o)H$/TˬR<7Ip m֦8Йxhw߻^R7e6mWwO?ǁӧLf_I3=P 5݅4Æ "]mFozJ)èauzcʹpꌃ4Ƅ?3$!hhOylY!lIٕ͚1,뻰fꞁtjuU*3뒮km9W޹0잂W=UޗP qf,[ۙm; >| 'dDpJl8^dkZðT8*mJBE. 6!0~x>|hs(/e[] ]=2Lzd2s̘yDȸ3888DZZZ1j8^~oo.[Ww+ygZ߸ -˖}8Nh}fՔΊz\Q2J_ 1`]rQ>\DC*G+2&g5JVeX%FǕ.n2@b>q@!3j(>H$p6a6VE*bcoO'<\.WDAmÌG91cgj7yƍSj+J*+&kl"u]5b3+r09,U.gm0uTl{pxG*D.rV5~5Ǝ[fvQV>ޒ$R__>/ԑh&R.99ϰéo8]s¯rݿ .Kߕl .+vPWWJA(f,i>cwF &~)hrIX,WkU|rP(=8 taF:[T,e#*5:e |>dRn ܀Piq_4cnUڲʠ.@`䨑enJھJςT*EX$J| v)X͝;r d$I){`tRjjjr(ʇlbR) \e׬wxqXAIRCO퇮HdYR 9#8hixnbI&;FG2y"7t7Ѫ2fP*3y,lp ?Tu@y0ڢ|.AG yECHI<1@osOWWWkkkbl]zXUV,Ve~9m JB㵽Rfa ~'H\|M+9r XoP(o~{@]m]m&]T>Xf_HQdTgRMOcAp۾Ϡ Az 8urTJwmhgؔa(D"ZH}ϯX,)D">?d2JfDji ӽZ{@'RHnl?@ìN6ߎAi%Xi/뺡rɓ'2k꠨1M$em&U)EYj4l>C8 H1֮uf!`It0תZ6%DX yFE2Ԕ%Sr2. ZP';>VbJ*̨fw|PI)cDb.aVR닙LƱѾOY*us%i_%haVZ ef:::XT# ۶l) ˲(Yq]=UZm7UFj_jgzaL w@yӦآtww>T*U6n.P2|[N()"^)EUPy_z k|v9q\@K\%%M)"V(3Bؠ.]ϥuF6m'?՟>b1ڶ@e2Ϊ4=Mhߺ]Oirw̋s[l!ɐH$Bo.ۂPBD p l@JH$BL( {A%H6՟fǪxkmm7|~vP'˕dPEү<#>H߁\ Ī7#G$$" ~+Of2ڶ¸Ҏ1ks~؉ňǜ2ݿXO@@;5hK(Ų UfÆ $Idƞ' IDAT[ark[Y(ƈU*-oӦMtwweJoDcc#|ϻ"N=܀/SZબjR ud2[JE ղXe~@W*[Wߛ+jRBkiii!˕YMTVJΒ+JSĸL1R -5555L4) fUyО_[Ja2t8KqW{rytСC>|8BBP=VAY6_jg(qAkגNIReRKJb6hU6S,H$5!x(Oy&ʟx-Ň= 1!Ruq,F0VS-QP%Q^-%|W0j=D Jrg_m]fQaÆ1vPW3*nxzSX,+RICK+Ud]X1gfhأ6nAa}ʘdI!^T``WBK1c4AJyPUVYAmvZ3[p˧_1^ !f 7#Vu?9+zuEr@Қ\6ҡ,_В7l+VLE#lw}[ᄆ62:.n$Э‰FHN9a;8Rb~2cĚ[h.UW/%)En8zqfl6q2]cO~a^3O:qfpB:>Py}Tl_MOƒW{jӦ쒀a ~V64ʭK)=)+t)z>N߸3u؁EÆ15;a&s)dBzV[U9*ON)yGi/uϭyƕ繸g>+v_ RJ_'E?1*eNWeL ?`|( 2<߮ nxʥ=bXlkg \/B/_E]~9stN8G1RGNM+;_6SZWh&_,]:f4 :|5{(VJz @͔!~@s%:<|M1s~ TǷ8æ /S䋚\QQq=EQU3DXƲ=)1Or(0Hh U8g\~<^RayR"R~1i<11άq $W~ϫ EW~tWiŦNN< p p/{1L=!LԸq"v ݃%f;ˏE:8p[sRفհ.OO TيSJcTfسm:8ĵ@jS_SFs'!&Vc@Һ9du8pأ$'6[l>Q,)d3p~}Q/ZMUu g6嫶зg3~o:KZ; {` t,S͘fIMM$Km蘛F6iM$erSTkt1")8Yu@oH00?Pa8Ė#Ȫe:ZҦmKXmI><#:!9Hx/<5IS RL')iSCIHTj C.э*"TRdNVnyJq2KDGjJ2w]u)=vҽ+{{{|2|#Xn/fm{aƜߖ3ƋT 1զ-EHw*/>zIi!,0ðNb ͊[Ya:ۆsޚ9&)M` )%dt3``>%8b2)t1b *!L)6lv-hٺ!ÃBI--w%pE/,?d KB'N]R!U5%&N MՏQ jE&?oT;0hS^Z,EQ% v[i ;1uw2IHP+4̼ ӵSrs2Ǖ~S!8K{6l*xwѹCp zoZDIinwC`ޥG]DJUSRZ͆)̤AYeu T`60ðndUw QȪ@orٙ@Hvh j'L|&M 5HR!f?QRG'*2*JQ֟@q_^RT+ _M ?,yEVĵM ꝫhP<8ehLjWհJ'H)?"*'"h ul,^?쫖Ȉ%;thMuGdap +7z3:N9?] |!Τk̚ϋݷ^i' Zx>|:Mvc\p n{mAfZΔslx<28X}g/qbܸð.>߲z!4.R!sNƗFT @B5 tUh@O@t- n" d{bs ZpHbr"v)(lF#e5$"0X$T Z}TF*t?bSNH,7.G,D4k,HdLWxݯ ;XXs/̐}xon|vy+9~^B8y{j쬴@%€]򢓐%PE{ݽ=[vTX ˪*7msIˁ$9!XXH')I ~XFTJt?A3dIgO>p oii2bT);//l~)`A(_ˉ"8th 3+J5PTY=KC=XPeۖZZUVUuJJ(.[JU5^:g-.絷'y4aSUS $G.XkS^QÕc\O[spdk+P PTYœv9JBEQ9)4S_\GK:R(*$&؂9S&UDi4#'ASif,̟[JjYdE.X(I:1iJP!~%rI%H Nc8.}3D@~(Ȓ!ťEz⯓V#YMJPU5q_Jm1a(_ TrT*tؖe+7Yt-={tهo+#)jZ8.=%Ԕ$/X%qFԽg-=y.xUj횬U)P/W{tr2*p;aHJ T])AGeҿsr` ELTF(6NCvVb}a9˶pL榾W!!nKk0<4t]2:%TF&S0ݖI]HitV:R4ӟv,{,+U3e4xd$ZgIu`I(hu -g'q{pn|Jۜ k$id*5j`QrL|j^7*onn#s"I2Y 2;U?~FfK-5yixT矕tvF?]&1NqeCА jm"8opfR=ڥLТ6,'i*4`K)gd5)Qy*vTU<0;آR8ns®*ɽ_yleEG M"_#N$u9uҿ (nCd֊+:gf':'ݑL= vזEߺ"x Z̀5u <[*<9ٙuӶ]/Y(.E'7ess1|@dbϞ=sLB>}\}n? KW6D!f@(zU$f:ѫt+w?>N@Qh !)^Zӵ]Ij=.%l g޿`?Wۮ${">ʺu27z $S/b rj#`t|3WC6VO0Ko;Y>O7?~sׯYn.Pڅ嶐w0[QC#ծ]e|Y *{Yx_#I~ʪj4 0jSB4;蜓DM^]q3}~@,P~gjZ,@RfμHV2/?&\g=:uM" H]tqu"˞EŜ3^'aI%|ZRDcbZQ9h7>dz)W^2<@V vor9/]&I@U)Sg\xiKp9:d -=*O}}=v>9GsK韗찺KXlx.Iv<p?s^Hx}*wQZa^g&锗D#VQOMV;WTv uEKkS 2?$_dhha]lY<$GGZ <قDuhAI-g/)>m }eKngymUr3(N7,ʡ1Z Yɲg^|ۊ*x,-bd߹v3IL/}KVIK"z[Emޙ𳲻LʚUVV6 Xcu#Bd+[RNIK_v \`W-ni$3AbB0P)xwTV<էOFVJ:׷h$JrzqBH`w0/3ɢ)SS篣J;dN)$ŞlSUe%pѬg+)6F+/DG"(4z3lY U?k6tpĢR>l 3~ygΧu^VlduU}bj΂5mE~eT*m2:$hY\vFOU<~j.`pZ[3q=װh+ M*YIF?}FÇtr "6ICscOvImb~ZoEpERX"CIIEkp0#3Yde"i'7 +<| !p:&}o:}{u dб"ght8Us?8dj6Ge rzzQX8INY-)]2$۲:wicEفhPT=loNKeyd+u~gGST׿Tr*lj2( \G6z ]?-+f>SLzˠdu"ۙCJ(j|u۳ k嫍>j&fϹP)\IDATܪ84}itH%gnlg{Mz/zщ_1O8+HM W~5r~ ?}0'픒?<ں~VgY&$WI)L(If9r\y͑W~jOzvͥmv i (BOGUuK)K^7{/'x*VġIjFL: %yFr2vHm5yr=Iw E2Uߐ$Hm% % ?e ]$}]PJUlӍcihs- W({i'V/=s'm[hf~:/ɥEFzjE<63g/`ي|;eԒs;d&p8$Izv-NivM>EW= I#v{VO:r<`yaS^٣ȦXdLq}ɮ؝EY.Tl]S[|mݮ; u *C4i|1ʪBZR|ƣ5$JjE8dVS`}ZAZ\M{COcc X8Op'o׏?_O]ć_Q*UUUwU-ˎTWR~eө*>UTU}S]PgI&GU!xP,-+Ԁ&x_:GS## u][>"R{|w8kXm 6=z4t@J|eky~tGu$ٙ!/Zw`ГJV=O/YFV㯫x_E%rFF܊QhsYh2*\ѬG{G@JH$\2˷?5U;v"GD6! 8X2_[ Q0n&<5w4*j}GB0!5%/NFHAf9\r锖U]ZF玭fUi63N=+/NFz*-\oN䒿Mͯ+ڲx]lFmr8c9c m=dqYUT1aB&0_eWj,hQV,QD2ǚ-X2Y}(Xi)|f҉50ޠ h>/!Άm*T=2Mp') 76X/40=Cve$},}esNkm_3gJ>yQ .Yk{EV ^~)~=Oihr>}'Ҵ" ez.'7OExRN)˱odܒ?~]XVXdKKK]XUU= Xd5!m#bUfy&%`a*qڀo1H[S$TU8UfxDZ7*^wx6sn\>{}+Y4iZf~HY@NMMT]]s0jÎ=F+Ou8%)kоʪXaWu=Na75- AIDU]1V76ϖԼ>Q/!mr\9% HV[E ɥwEbaѤ"( ƨag:*#VSt8*VĞ!;! {bbl'w2y91]#JNw#*)oSZ*9:(‡-XVu$feSS|kRJKDu -O@YY٩$1cdq7*^kDUum_|I?,RUUK<<TZB3pTmj dz$D tHII-55):U[۳3v%7+=TSx}LOYֺ%~aR ,X\7711^oCԇF_~eVy>@MNN^S_n?)= -`hA+I-yyySur8T@MIIYl* ad-Ċ) ittT HLL|zG,FFIHRЪ2BO 3Zآh.Ō:\ke '};Uh8vpڗ r3Nh|<AEzmb_/^, hl=r*`P+Ķ.bd_%¨Y5hVNjb۱VS Tgs*w>B,GF0LjsR(;Nh,XpĢpE@ (&KE |1F"Ul5l57͙l<`Μ 9#ꘞmþ@a_!pRd-(G%3 y180>*Zqh,;,X8RqM0_LuC3ĵCh9*8oAJ4'Uhѡ*Zwd>?`dQh!ɟ,OGF- EKќ—{T֟CV_f6/B0_ˡg`f=BPno`<85YF9ð:l;^_Zќ'|Y"Qg3y]k*HF'hAyGU|7V48^lXE @Xij~BPg]fŠVJ8w0 mbd3>h&8cG Sz, /~t4)B+DKo 6l jEV8lLv$aoD5ѓY#[mM4p$37 VEK0¼j~{Ϫq3&|C`"+ GEl~k|UN8K&X77% "h%|hwٷj%/lq|}|f4׷͏6z,V.{т-[/7:ZhNb]!Xq_qyEqzϰ4&z͌} xRO3*h~#E<ގ!@"j VfAۂ|h~,&iV7p8A^D:h~21r Keystone Folder 2 API Glance Folder 3 REST API Glance DB Database Abstraction Layer Glance Domain Controller Auth Notifier Policy Quota Location DB AuthZ Middleware Registry Layer Glance Store Folder 4 Glance Store Drivers AuthN Supported Storages Folder 5 Swift Ceph Sheepdog ... Filesystem A client Folder 7 AuthN <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 l1.627,2.193L31.291,46.792z"/> </svg> <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> <path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> <path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 l1.46,1.834L31.04,45.982z"/> </svg> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images_src/glance_db.graphml0000664000175000017500000003010600000000000022511 0ustar00zuulzuul00000000000000 Images id: varchar(36), primary name: varchar(255), nullable size: bigint(20), nullable status: varchar(30) is_public: tinyint(1) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) disk_format: varchar(20), nullable container_format: varchar(20), nullable checksum: varchar(32), nullable owner: varchar(255), nullable min_disk: int(11) min_ram: int(11) protected: tinyint(1) virtual_size: bigint(20), nullable image_locations id: int(11), primary image_id: varchar(36) value: text created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) meta_data: text, nullable status: varchar(30) image_members id: int(11), primary image_id: varchar(36) member: varchar(255) can_share: tiny_int(1) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) status: varchar(20) image_properties id: int(11), primary image_id: varchar(36) name: varchar(255) value: text, nullable created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) image_tags id: int(11), primary image_id: varchar(36) value: varchar(255) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images_src/glance_layers.graphml0000664000175000017500000005162600000000000023435 0ustar00zuulzuul00000000000000 Domain Router api/v2/router.py REST API api/v2/* Auth api/authorization.py Notifier notifier.py Policy api/policy.py Quota quota/__init__.py Location location.py DB db/__init__.py Registry (optional) registry/v2/* Data Access db/sqlalchemy/api.py A Client Glance Store DBMS Property protection (optional) api/property_protections.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/images_src/image_status_transition.dot0000664000175000017500000000377000000000000024715 0ustar00zuulzuul00000000000000/* # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. */ /* This file can be compiled by graphviz with issuing the following command: dot -Tpng -oimage_status_transition.png image_status_transition.dot See http://www.graphviz.org to get more info. */ digraph { node [shape="doublecircle" color="#006699" style="filled" fillcolor="#33CCFF" fixedsize="True" width="1.5" height="1.5"]; "" -> "queued" [label="create image"]; "queued" -> "active" [label="add location*"]; "queued" -> "saving" [label="upload"]; "queued" -> "uploading" [label="stage upload"]; "queued" -> "deleted" [label="delete"]; "saving" -> "active" [label="upload succeed"]; "saving" -> "killed" [label="[v1] upload fail"]; "saving" -> "queued" [label="[v2] upload fail"]; "saving" -> "deleted" [label="delete"]; "uploading" -> "importing" [label="import"]; "uploading" -> "queued" [label="stage upload fail"]; "uploading" -> "deleted" [label="delete"]; "importing" -> "active" [label="import succeed"]; "importing" -> "queued" [label="import fail"]; "importing" -> "deleted" [label="delete"]; "active" -> "pending_delete" [label="delayed delete"]; "active" -> "deleted" [label="delete"]; "active" -> "deactivated" [label="deactivate"]; "deactivated" -> "active" [label="reactivate"]; "deactivated" -> "deleted" [label="delete"]; "killed" -> "deleted" [label="delete"]; "pending_delete" -> "deleted" [label="after scrub time"]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/index.rst0000664000175000017500000000774600000000000017002 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Welcome to Glance's documentation! ================================== About Glance ============ The Image service (glance) project provides a service where users can upload and discover data assets that are meant to be used with other services. This currently includes *images* and *metadata definitions*. Images ------ Glance image services include discovering, registering, and retrieving virtual machine (VM) images. Glance has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. .. include:: deprecation-note.inc VM images made available through Glance can be stored in a variety of locations from simple filesystems to object-storage systems like the OpenStack Swift project. Metadata Definitions -------------------- Glance hosts a *metadefs* catalog. This provides the OpenStack community with a way to programmatically determine various metadata key names and valid values that can be applied to OpenStack resources. Note that what we're talking about here is simply a *catalog*; the keys and values don't actually do anything unless they are applied to individual OpenStack resources using the APIs or client tools provided by the services responsible for those resources. It's also worth noting that there is no special relationship between the Image Service and the Metadefs Service. If you want to apply the keys and values defined in the Metadefs Service to images, you must use the Image Service API or client tools just as you would for any other OpenStack service. Design Principles ----------------- Glance, as with all OpenStack projects, is written with the following design guidelines in mind: * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open standards**: Be a reference implementation for a community-driven api Glance Documentation ==================== The Glance Project Team has put together the following documentation for you. Pick the documents that best match your user profile. .. list-table:: :header-rows: 1 * - User Profile - Links * - | **Contributor** | You want to contribute code, documentation, reviews, or ideas to the Glance Project. - * :doc:`contributor/index` * - | **Administrator** | You want to administer and maintain a Glance installation, including being aware of changes in Glance from release to release. - * :doc:`admin/index` * :doc:`cli/index` * `Glance Release Notes `_ * - | **Operator** | You want to install and configure Glance for your cloud. - * :doc:`install/index` * :doc:`configuration/index` * - | **End User** or **Third-party Developer** | You want to use the Image Service APIs provided by Glance. - * `Image Service API Reference `_ * `Image Service API Guide `_ * :doc:`user/index` .. toctree:: :hidden: contributor/index admin/index cli/index install/index configuration/index user/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8262994 glance-29.0.0/doc/source/install/0000775000175000017500000000000000000000000016571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/configure-quotas.rst0000664000175000017500000000260500000000000022621 0ustar00zuulzuul00000000000000* In the ``[oslo_limit]`` section, configure access to keystone: .. code-block:: ini [oslo_limit] auth_url = http://controller:5000 auth_type = password user_domain_id = default username = glance system_scope = all password = GLANCE_PASS endpoint_id = ENDPOINT_ID region_name = RegionOne .. end Replace ``GLANCE_PASS`` with the password you chose for the ``glance`` user in the Identity service. Replace ENDPOINT_ID with the ID of the image endpoint you created earlier (in our case, this would be 340be3625e9b4239a6415d034e98aace), and that you may find by running: .. code-block:: console $ openstack endpoint list --service glance --region RegionOne .. end Make sure that the glance account has reader access to system-scope resources (like limits): .. code-block:: console $ openstack role add --user glance --user-domain Default --system all reader .. end See `the oslo_limit docs `_ for more information about configuring the unified limits client. * In the ``[DEFAULT]`` section, optionally enable per-tenant quotas: .. path /etc/glance/glance.conf .. code-block:: ini [DEFAULT] use_keystone_limits = True .. end Note that you must have created the registered limits as described above if this is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/edit-glance-api-conf.rst0000664000175000017500000000272300000000000023175 0ustar00zuulzuul00000000000000* In the ``[database]`` section, configure database access: .. path /etc/glance/glance.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance .. end Replace ``GLANCE_DBPASS`` with the password you chose for the Image service database. * In the ``[keystone_authtoken]`` and ``[paste_deploy]`` sections, configure Identity service access: .. path /etc/glance/glance.conf .. code-block:: ini [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = GLANCE_PASS [paste_deploy] # ... flavor = keystone .. end Replace ``GLANCE_PASS`` with the password you chose for the ``glance`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[glance_store]`` section, configure the local file system store and location of image files: .. path /etc/glance/glance.conf .. code-block:: ini [DEFAULT] # ... enabled_backends=fs:file [glance_store] # ... default_backend = fs [fs] filesystem_store_datadir = /var/lib/glance/images/ .. end .. include:: configure-quotas.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/get-started.rst0000664000175000017500000000644000000000000021552 0ustar00zuulzuul00000000000000====================== Image service overview ====================== The Image service (glance) enables users to discover, register, and retrieve virtual machine images. It offers a REST API that enables you to query virtual machine image metadata and retrieve an actual image. You can store virtual machine images made available through the Image service in a variety of locations, from simple file systems to object-storage systems like OpenStack Object Storage. .. important:: For simplicity, this guide describes configuring the Image service to use the ``file`` back end, which uploads and stores in a directory on the controller node hosting the Image service. By default, this directory is ``/var/lib/glance/images/``. Before you proceed, ensure that the controller node has at least several gigabytes of space available in this directory. Keep in mind that since the ``file`` back end is often local to a controller node, it is not typically suitable for a multi-node glance deployment. For information on requirements for other back ends, see `Configuration Reference <../configuration/index.html>`__. The OpenStack Image service is central to Infrastructure-as-a-Service (IaaS). It accepts API requests for disk or server images, and metadata definitions from end users or OpenStack Compute components. It also supports the storage of disk or server images on various repository types, including OpenStack Object Storage. A number of periodic processes run on the OpenStack Image service to support caching. Replication services ensure consistency and availability through the cluster. Other periodic processes include auditors, updaters, and reapers. The OpenStack Image service includes the following components: glance-api Accepts Image API calls for image discovery, retrieval, and storage. .. note:: An OpenStack Community Goal in the Pike release was `Control Plane API endpoints deployment via WSGI`_. As currently constituted, however, glance-api is **not suitable** to be run in such a configuration. Instead we recommend that Glance be run in the traditional manner as a standalone server. See the "Known Issues" section of the `Glance Release Notes`_ for the Pike and Queens releases for more information. .. _`Control Plane API endpoints deployment via WSGI`: https://governance.openstack.org/tc/goals/pike/deploy-api-in-wsgi.html .. _`Glance Release Notes`: https://docs.openstack.org/releasenotes/glance/index.html Database Stores image metadata and you can choose your database depending on your preference. Most deployments use MySQL or SQLite. Storage repository for image files Various repository types are supported including normal file systems (or any filesystem mounted on the glance-api controller node), Object Storage, RADOS block devices, VMware datastore, and HTTP. Note that some repositories will only support read-only usage. Metadata definition service A common API for vendors, admins, services, and users to meaningfully define their own custom metadata. This metadata can be used on different types of resources like images, artifacts, volumes, flavors, and aggregates. A definition includes the new property's key, description, constraints, and the resource types which it can be associated with. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/index.rst0000664000175000017500000000140600000000000020433 0ustar00zuulzuul00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================== Glance Installation =================== .. toctree:: get-started install.rst verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/install-debian.rst0000664000175000017500000001503600000000000022216 0ustar00zuulzuul00000000000000Install and configure (Debian) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Image service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. Prerequisites ------------- Before you install and configure the Image service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``glance`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE glance; .. end * Grant proper access to the ``glance`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY 'GLANCE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY 'GLANCE_DBPASS'; .. end Replace ``GLANCE_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``glance`` user: .. code-block:: console $ openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 3f4e777c4062483ab8d9edd7dff829df | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``glance`` user and ``service`` project: .. code-block:: console $ openstack role add --project service --user glance admin .. end .. note:: This command provides no output. * Create the ``glance`` service entity: .. code-block:: console $ openstack service create --name glance \ --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | name | glance | | type | image | +-------------+----------------------------------+ .. end #. Create the Image service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0c37ed58103f4300a84ff125a539032d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ .. end #. Register quota limits (optional): .. include:: register-quotas.rst Install and configure components -------------------------------- .. include:: note_configuration_vary_by_distribution.txt #. Install the packages: .. code-block:: console # apt install glance .. end #. Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: .. include:: edit-glance-api-conf.rst 3. Populate the Image service database: .. code-block:: console # su -s /bin/sh -c "glance-manage db_sync" glance .. end .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- #. Restart the Image services: .. code-block:: console # service glance-api restart .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/install-obs.rst0000664000175000017500000001560700000000000021563 0ustar00zuulzuul00000000000000Install and configure (SUSE) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Image service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. Prerequisites ------------- Before you install and configure the Image service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``glance`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE glance; .. end * Grant proper access to the ``glance`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY 'GLANCE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY 'GLANCE_DBPASS'; .. end Replace ``GLANCE_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``glance`` user: .. code-block:: console $ openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 3f4e777c4062483ab8d9edd7dff829df | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``glance`` user and ``service`` project: .. code-block:: console $ openstack role add --project service --user glance admin .. end .. note:: This command provides no output. * Create the ``glance`` service entity: .. code-block:: console $ openstack service create --name glance \ --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | name | glance | | type | image | +-------------+----------------------------------+ .. end #. Create the Image service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0c37ed58103f4300a84ff125a539032d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ .. end #. Register quota limits (optional): .. include:: register-quotas.rst Install and configure components -------------------------------- .. include:: note_configuration_vary_by_distribution.txt .. note:: Starting with the Newton release, SUSE OpenStack packages are shipping with the upstream default configuration files. For example ``/etc/glance/glance-api.conf``, with customizations in ``/etc/glance/glance-api.conf.d/``. While the following instructions modify the default configuration files, adding new files in ``/etc/glance/glance-api.conf.d`` achieves the same result. #. Install the packages: .. code-block:: console # zypper install openstack-glance \ openstack-glance-api .. end #. Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: .. include:: edit-glance-api-conf.rst Finalize installation --------------------- * Start the Image services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-glance-api.service # systemctl start openstack-glance-api.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/install-rdo.rst0000664000175000017500000001523600000000000021562 0ustar00zuulzuul00000000000000Install and configure (Red Hat) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Image service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. Prerequisites ------------- Before you install and configure the Image service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end * Create the ``glance`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE glance; .. end * Grant proper access to the ``glance`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY 'GLANCE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY 'GLANCE_DBPASS'; .. end Replace ``GLANCE_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``glance`` user: .. code-block:: console $ openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 3f4e777c4062483ab8d9edd7dff829df | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``glance`` user and ``service`` project: .. code-block:: console $ openstack role add --project service --user glance admin .. end .. note:: This command provides no output. * Create the ``glance`` service entity: .. code-block:: console $ openstack service create --name glance \ --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | name | glance | | type | image | +-------------+----------------------------------+ .. end #. Create the Image service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0c37ed58103f4300a84ff125a539032d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ .. end #. Register quota limits (optional): .. include:: register-quotas.rst Install and configure components -------------------------------- .. include:: note_configuration_vary_by_distribution.txt #. Install the packages: .. code-block:: console # yum install openstack-glance .. end #. Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: .. include:: edit-glance-api-conf.rst 3. Populate the Image service database: .. code-block:: console # su -s /bin/sh -c "glance-manage db_sync" glance .. end .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- * Start the Image services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-glance-api.service # systemctl start openstack-glance-api.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/install-ubuntu.rst0000664000175000017500000001502300000000000022312 0ustar00zuulzuul00000000000000Install and configure (Ubuntu) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Image service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. Prerequisites ------------- Before you install and configure the Image service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql .. end * Create the ``glance`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE glance; .. end * Grant proper access to the ``glance`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY 'GLANCE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY 'GLANCE_DBPASS'; .. end Replace ``GLANCE_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. To create the service credentials, complete these steps: * Create the ``glance`` user: .. code-block:: console $ openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 3f4e777c4062483ab8d9edd7dff829df | | name | glance | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Add the ``admin`` role to the ``glance`` user and ``service`` project: .. code-block:: console $ openstack role add --project service --user glance admin .. end .. note:: This command provides no output. * Create the ``glance`` service entity: .. code-block:: console $ openstack service create --name glance \ --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | name | glance | | type | image | +-------------+----------------------------------+ .. end #. Create the Image service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 340be3625e9b4239a6415d034e98aace | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | a6e4b153c2ae4c919eccfdbb7dceb5d2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0c37ed58103f4300a84ff125a539032d | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 8c2c7f1b9b5049ea9e63757b5533e6d2 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ .. end #. Register quota limits (optional): .. include:: register-quotas.rst Install and configure components -------------------------------- .. include:: note_configuration_vary_by_distribution.txt #. Install the packages: .. code-block:: console # apt install glance .. end #. Edit the ``/etc/glance/glance-api.conf`` file and complete the following actions: .. include:: edit-glance-api-conf.rst 3. Populate the Image service database: .. code-block:: console # su -s /bin/sh -c "glance-manage db_sync" glance .. end .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- #. Restart the Image services: .. code-block:: console # service glance-api restart .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/install.rst0000664000175000017500000000042000000000000020765 0ustar00zuulzuul00000000000000Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Image service, code-named glance, on the controller node. For simplicity, this configuration stores images on the local file system. .. toctree:: :glob: install-* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/note_configuration_vary_by_distribution.txt0000664000175000017500000000046600000000000027566 0ustar00zuulzuul00000000000000.. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/register-quotas.rst0000664000175000017500000000601700000000000022465 0ustar00zuulzuul00000000000000If you decide to use per-tenant quotas in Glance, you must register the limits in Keystone first: .. code-block:: console $ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 1000 --region RegionOne image_size_total +---------------+----------------------------------+ | Field | Value | +---------------+----------------------------------+ | default_limit | 1000 | | description | None | | id | 9cedfc5de80345a9b13ed00c2b5460f2 | | region_id | RegionOne | | resource_name | image_size_total | | service_id | e38c84a2487f49fd9864193bdc8a3174 | +---------------+----------------------------------+ $ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 1000 --region RegionOne image_stage_total +---------------+----------------------------------+ | Field | Value | +---------------+----------------------------------+ | default_limit | 1000 | | description | None | | id | 5a68712b6ba6496d823d0c66e5e860b9 | | region_id | RegionOne | | resource_name | image_stage_total | | service_id | e38c84a2487f49fd9864193bdc8a3174 | +---------------+----------------------------------+ $ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 100 --region RegionOne image_count_total +---------------+----------------------------------+ | Field | Value | +---------------+----------------------------------+ | default_limit | 100 | | description | None | | id | beb91b043296499f8e6268f29d8b2749 | | region_id | RegionOne | | resource_name | image_count_total | | service_id | e38c84a2487f49fd9864193bdc8a3174 | +---------------+----------------------------------+ $ openstack --os-cloud devstack-system-admin registered limit create \ --service glance --default-limit 100 --region RegionOne \ image_count_uploading +---------------+----------------------------------+ | Field | Value | +---------------+----------------------------------+ | default_limit | 100 | | description | None | | id | fc29649c047a45bf9bc03ec4a7bcb8af | | region_id | RegionOne | | resource_name | image_count_uploading | | service_id | e38c84a2487f49fd9864193bdc8a3174 | +---------------+----------------------------------+ .. end Be sure to also set ``use_keystone_limits=True`` in your ``glance-api.conf`` file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/install/verify.rst0000664000175000017500000001007700000000000020634 0ustar00zuulzuul00000000000000Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Image service using `CirrOS `__, a small Linux image that helps you test your OpenStack deployment. For more information about how to download and build images, see `OpenStack Virtual Machine Image Guide `__. For information about how to manage images, see the `OpenStack End User Guide `__. .. note:: Perform these commands on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. Download the source image: .. code-block:: console $ wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img .. end .. note:: Install ``wget`` if your distribution does not include it. #. Upload the image to the Image service using the QCOW2 (QEMU Copy On Write 2) disk format, bare container format, and public visibility so all projects can access it: .. code-block:: console $ glance image-create --name "cirros" \ --file cirros-0.4.0-x86_64-disk.img \ --disk-format qcow2 --container-format bare \ --visibility=public +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | 133eae9fb1c98f45894a4e60d8736619 | | container_format | bare | | created_at | 2015-03-26T16:52:10Z | | disk_format | qcow2 | | file | /v2/images/cc5c6982-4910-471e-b864-1098015901b5/file | | id | cc5c6982-4910-471e-b864-1098015901b5 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | ae7a98326b9c455588edd2656d723b9d | | protected | False | | schema | /v2/schemas/image | | size | 13200896 | | status | active | | tags | | | updated_at | 2015-03-26T16:52:10Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------+ .. end For information about the :command:`glance` parameters, see `Image service (glance) command-line client `__ in the ``OpenStack User Guide``. For information about disk and container formats for images, see `Disk and container formats for images `__ in the ``OpenStack Virtual Machine Image Guide``. .. note:: OpenStack generates IDs dynamically, so you will see different values in the example command output. #. Confirm upload of the image and validate attributes: .. code-block:: console $ glance image-list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 38047887-61a7-41ea-9b49-27987d5e8bb9 | cirros | active | +--------------------------------------+--------+--------+ .. end ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8302999 glance-29.0.0/doc/source/user/0000775000175000017500000000000000000000000016101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/common-image-properties.rst0000664000175000017500000000666700000000000023414 0ustar00zuulzuul00000000000000.. Copyright 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common Image Properties ======================= When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image. This document explains the names of these properties and the expected values. The common image properties are also described in a JSON schema, found in ``/etc/glance/schema-image.json`` in the Glance source code. kernel_id The ID of image stored in Glance that should be used as the kernel when booting an AMI-style image. ramdisk_id The ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image. instance_uuid Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.) architecture Operating system architecture as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html os_distro The common name of the operating system distribution as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html os_version The operating system version as specified by the distributor. description A brief human-readable string, suitable for display in a user interface, describing the image. cinder_encryption_key_id Identifier in the OpenStack Key Management Service for the encryption key for the Block Storage Service to use when mounting a volume created from this image. cinder_encryption_key_deletion_policy States the condition under which the Image Service will delete the object associated with the 'cinder_encryption_key_id' image property. If this property is missing, the Image Service will take no action. This file is the default schema. An operator can modify ``/etc/schema-image.json`` to include arbitrary properties. .. warning:: * Do not delete existing properties from this default schema because this will affect interoperability * The ``type`` of each property in this JSON schema, specified by the ``type`` key, must have value ``string`` even if the property you are adding is not a string in common sense. For example, if you want to add a property named ``is_removable`` and want its type to be ``boolean``. However, you must give the ``type`` key the value ``string``. Otherwise, when an end-user makes a call that sets a value on one of these, they will gets a 500. This is because everything in the image_properties table must be a string in the database. The API, however, won't accept a string value when the schema says it is boolean or some other non-string JSON data type .. note:: If your need is more complicated, we recommend using metadefs_ instead of modifying this image schema .. _metadefs: https://docs.openstack.org/api-ref/image/v2/metadefs-index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/formats.rst0000664000175000017500000001405100000000000020307 0ustar00zuulzuul00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _formats: Disk and Container Formats ========================== When adding an image to Glance, you must specify what the virtual machine image's *disk format* and *container format* are. Disk and container formats are configurable on a per-deployment basis. This document intends to establish a global convention for what specific values of *disk_format* and *container_format* mean. Disk Format ----------- The disk format of a virtual machine image is the format of the underlying disk image. Virtual appliance vendors have different formats for laying out the information contained in a virtual machine disk image. You can set your image's disk format to one of the following: raw This is an unstructured disk image format. The ``raw`` image format is the simplest one, and is natively supported by both KVM and Xen hypervisors. You can think of a raw image as being the bit-equivalent of a block device file, created as if somebody had copied, say, ``/dev/sda`` to a file using the :command:`dd` command. vhd This is the VHD (Virtual Hard Disk) disk format, a common disk format used by virtual machine monitors from VMware, Xen, Microsoft, VirtualBox, and others. vhdx This is the `VHDX `_ format, an enhanced version of the ``vhd`` format. It has support for larger disk sizes and protection against data corruption during power failures. vmdk The `VMDK `_ (Virtual Machine Disk) format is supported by many common virtual machine monitors, for example the VMware ESXi hypervisor. vdi The `VDI `_ (Virtual Disk Image) format for image files is supported by the VirtualBox virtual machine monitor and the QEMU emulator. iso The `ISO `_ format is a disk image formatted with the read-only ISO 9660 (also known as ECMA-119) filesystem commonly used for CDs and DVDs. ploop A disk format supported and used by Virtuozzo to run OS Containers. qcow2 The `QCOW2 `_ (QEMU copy-on-write version 2) format is commonly used with the KVM hypervisor. It uses a sparse representation, so the image size is smaller than a raw format file of the same virtual disk. It can expand dynamically and supports Copy on Write. The `AKI/AMI/ARI `_ format was the initial image format supported by Amazon EC2. The image consists of three files, each of which has its own specific ``disk_format`` identifier: aki This indicates what is stored in Glance is an Amazon Kernel Image (AKI). It is a kernel file that the hypervisor will load initially to boot the image. For a Linux machine, this would be a ``vmlinuz`` file. ari This indicates what is stored in Glance is an Amazon Ramdisk Image (ARI). It is an optional ramdisk file mounted at boot time. For a Linux machine, this would be an ``initrd`` file. ami This indicates what is stored in Glance is an Amazon Machine Image (AMI). It is a virtual machine image in raw format. Container Format ---------------- The container format refers to whether the virtual machine image is in a file format that also contains metadata about the actual virtual machine. Note the following: 1. Glance does not verify that the ``container_format`` image property accurately describes the image data payload. 2. Do not assume that all OpenStack services can handle all the container formats defined by Glance. Consult the documentation for the service consuming your image to see what container formats the service supports. You can set your image's container format to one of the following: bare This indicates there is no container or metadata envelope for the image. ovf `OVF `_ (Open Virtualization Format) is a packaging format for virtual machines, defined by the Distributed Management Task Force (DMTF) standards group. An OVF package contains one or more image files, a ``.ovf`` XML metadata file that contains information about the virtual machine, and possibly other files as well. An OVF package can be distributed in different ways. For example, it could be distributed as a set of discrete files, or as a tar archive file with an ``.ova`` (open virtual appliance/application) extension. aki This indicates what is stored in Glance is an Amazon kernel image. ari This indicates what is stored in Glance is an Amazon ramdisk image. ami This indicates what is stored in Glance is an Amazon machine image. ova This indicates what is stored in Glance is an OVA tar archive file, that is, an OVF package contained in a single tar archive file. docker This indicates what is stored in Glance is a Docker tar archive of the container filesystem. compressed The exact format of the compressed file is not specified. It is the responsibility of the consuming service to analyze the data payload and determine the specific compression format. A particular OpenStack service may only support specific formats. You may assume that any OpenStack service that creates an image with a 'compressed' container format will be able to consume that image. Consult the documentation for the service that will consume your image for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/glanceapi.rst0000664000175000017500000010365500000000000020570 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Image Public APIs ================================ Glance is the reference implementation of the OpenStack Images API. As such, Glance fully implements versions 1 and 2 of the Images API. .. include:: ../deprecation-note.inc There used to be a sentence here saying, "The Images API specification is developed alongside Glance, but is not considered part of the Glance project." That's only partially true (or completely false, depending upon how strict you are about these things). Conceptually, the OpenStack Images API is an independent definition of a REST API. In practice, however, the only way to participate in the evolution of the Images API is to work with the Glance community to define the new functionality and provide its reference implementation. Further, Glance falls under the "designated sections" provision of the OpenStack Interop (formerly known as DefCore) Guidelines, which basically means that in order to qualify as "OpenStack", a cloud exposing an OpenStack Images API must include the Glance Images API implementation code. Thus, although conceptually independent, the OpenStack Images APIs are intimately associated with Glance. **References** * `Designated sections (definition) `_ * `2014-04-02 DefCore Designated Sections Guidelines `_ * `OpenStack Core Definition `_ * `Interop Guidelines Repository `_ Glance and the Images APIs: Past, Present, and Future ----------------------------------------------------- Here's a quick summary of the Images APIs that have been implemented by Glance. If you're interested in more details, you can consult the Release Notes for all the OpenStack releases (beginning with "Bexar") to follow the evolution of features in Glance and the Images APIs. Images v1 API ************* The v1 API was originally designed as a service API for use by Nova and other OpenStack services. In the Kilo release, the v1.1 API was downgraded from CURRENT to SUPPORTED. In the Newton release, the version 1 API is officially declared DEPRECATED. During the deprecation period, the Images v1 API is closed to further development. The Glance code implementing the v1 API accepts only serious bugfixes. Since Folsom, it has been possible to deploy OpenStack without exposing the Images v1 API to end users. The Compute v2 API contains image-related API calls allowing users to list images, list images details, show image details for a specific image, delete images, and manipulate image metadata. Nova acts as a proxy to Glance for these image-related calls. It's important to note that the image-related calls in the Compute v2 API are a proper subset of the calls available in the Images APIs. In the Newton release, Nova (and other OpenStack services that consume images) have been modified to use the Images v2 API by default. **Reference** * `OpenStack Standard Deprecation Requirements `_ Images v2 API ************* The v2 API is the CURRENT OpenStack Images API. It provides a more friendly interface to consumers than did the v1 API, as it was specifically designed to expose images-related functionality as a public-facing endpoint. It's the version that's currently open to development. A common strategy is to deploy multiple Glance nodes: internal-facing nodes providing the Images APIs for internal consumers like Nova, and external-facing nodes providing the Images v2 API for public use. The Future ********** During the long and tumultuous design phase of what has since become an independent service named "Glare" (the Glance Artifacts Repository), the Glance community loosely spoke about the Artifacts API being "Glance v3". This, however, was only a shorthand way of speaking of the Artifacts effort. The Artifacts API can't be the Images v3 API since Artifacts are not the same as Images. Conceptually, a virtual machine image could be an Artifact, and the Glare code has been designed to be compatible with the Images v2 API. But at this time, there are no plans to implement an Images v3 API. During the Newton development cycle, Glare became an independent OpenStack project. While it's evident that there's a need for an Artifact Repository in OpenStack, whether it will be as ubiquitous as the need for an Images Repository isn't clear. On the other hand, industry trends could go in the opposite direction where everyone needs Artifacts and deployers view images as simply another type of digital artifact. As Yogi Berra, an experienced manager, once said, "It's tough to make predictions, especially about the future." Authentication -------------- Glance depends on Keystone and the OpenStack Identity API to handle authentication of clients. You must obtain an authentication token from Keystone using and send it along with all API requests to Glance through the ``X-Auth-Token`` header. Glance will communicate back to Keystone to verify the token validity and obtain your identity credentials. See :ref:`authentication` for more information on integrating with Keystone. Using v1.X ---------- .. include:: ../deprecation-note.inc For the purpose of examples, assume there is a Glance API server running at the URL ``http://glance.openstack.example.org`` on the default port 80. List Available Images ********************* We want to see a list of available images that the authenticated user has access to. This includes images owned by the user, images shared with the user and public images. We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/images`` to retrieve this list of available images. The data is returned as a JSON-encoded mapping in the following format:: {'images': [ {'uri': 'http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', 'name': 'Ubuntu 10.04 Plain', 'disk_format': 'vhd', 'container_format': 'ovf', 'size': '5368709120'} ...]} List Available Images in More Detail ************************************ We want to see a more detailed list of available images that the authenticated user has access to. This includes images owned by the user, images shared with the user and public images. We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/images/detail`` to retrieve this list of available images. The data is returned as a JSON-encoded mapping in the following format:: {'images': [ {'uri': 'http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', 'name': 'Ubuntu 10.04 Plain 5GB', 'disk_format': 'vhd', 'container_format': 'ovf', 'size': '5368709120', 'checksum': 'c2e5db72bd7fd153f53ede5da5a06de3', 'created_at': '2010-02-03 09:34:01', 'updated_at': '2010-02-03 09:34:01', 'deleted_at': '', 'status': 'active', 'is_public': true, 'min_ram': 256, 'min_disk': 5, 'owner': null, 'properties': {'distro': 'Ubuntu 10.04 LTS'}}, ...]} .. note:: All timestamps returned are in UTC. The `updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. The `properties` field is a mapping of free-form key/value pairs that have been saved with the image metadata. The `checksum` field is an MD5 checksum of the image file data. The `is_public` field is a boolean indicating whether the image is publicly available. The `min_ram` field is an integer specifying the minimum amount of RAM needed to run this image on an instance, in megabytes. The `min_disk` field is an integer specifying the minimum amount of disk space needed to run this image on an instance, in gigabytes. The `owner` field is a string which may either be null or which will indicate the owner of the image. Filtering Images Lists ********************** Both the ``GET /v1/images`` and ``GET /v1/images/detail`` requests take query parameters that serve to filter the returned list of images. The following list details these query parameters. * ``name=NAME`` Filters images having a ``name`` attribute matching ``NAME``. * ``container_format=FORMAT`` Filters images having a ``container_format`` attribute matching ``FORMAT`` For more information, see :ref:`formats` * ``disk_format=FORMAT`` Filters images having a ``disk_format`` attribute matching ``FORMAT`` For more information, see :ref:`formats` * ``status=STATUS`` Filters images having a ``status`` attribute matching ``STATUS`` For more information, see :ref:`image-statuses` * ``size_min=BYTES`` Filters images having a ``size`` attribute greater than or equal to ``BYTES`` * ``size_max=BYTES`` Filters images having a ``size`` attribute less than or equal to ``BYTES`` These two resources also accept additional query parameters: * ``sort_key=KEY`` Results will be ordered by the specified image attribute ``KEY``. Accepted values include ``id``, ``name``, ``status``, ``disk_format``, ``container_format``, ``size``, ``created_at`` (default) and ``updated_at``. * ``sort_dir=DIR`` Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` for ascending or ``desc`` (default) for descending. * ``marker=ID`` An image identifier marker may be specified. When present, only images which occur after the identifier ``ID`` will be listed. (These are the images that have a `sort_key` later than that of the marker ``ID`` in the `sort_dir` direction.) * ``limit=LIMIT`` When present, the maximum number of results returned will not exceed ``LIMIT``. .. note:: If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) then the number of results returned may be less than ``LIMIT``. * ``is_public=PUBLIC`` An admin user may use the `is_public` parameter to control which results are returned. When the `is_public` parameter is absent or set to `True` the following images will be listed: Images whose `is_public` field is `True`, owned images and shared images. When the `is_public` parameter is set to `False` the following images will be listed: Images (owned, shared, or non-owned) whose `is_public` field is `False`. When the `is_public` parameter is set to `None` all images will be listed irrespective of owner, shared status or the `is_public` field. .. note:: Use of the `is_public` parameter is restricted to admin users. For all other users it will be ignored. Retrieve Image Metadata *********************** We want to see detailed information for a specific virtual machine image that the Glance server knows about. We have queried the Glance server for a list of images and the data returned includes the `uri` field for each available image. This `uri` field value contains the exact location needed to get the metadata for a specific image. Continuing the example from above, in order to get metadata about the first image returned, we can issue a ``HEAD`` request to the Glance server for the image's URI. We issue a ``HEAD`` request to ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to retrieve complete metadata for that image. The metadata is returned as a set of HTTP headers that begin with the prefix ``x-image-meta-``. The following shows an example of the HTTP headers returned from the above ``HEAD`` request:: x-image-meta-uri http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 x-image-meta-name Ubuntu 10.04 Plain 5GB x-image-meta-disk_format vhd x-image-meta-container_format ovf x-image-meta-size 5368709120 x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 x-image-meta-created_at 2010-02-03 09:34:01 x-image-meta-updated_at 2010-02-03 09:34:01 x-image-meta-deleted_at x-image-meta-status available x-image-meta-is_public true x-image-meta-min_ram 256 x-image-meta-min_disk 0 x-image-meta-owner null x-image-meta-property-distro Ubuntu 10.04 LTS .. note:: All timestamps returned are in UTC. The `x-image-meta-updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. There may be multiple headers that begin with the prefix `x-image-meta-property-`. These headers are free-form key/value pairs that have been saved with the image metadata. The key is the string after `x-image-meta-property-` and the value is the value of the header. The response's `ETag` header will always be equal to the `x-image-meta-checksum` value. The response's `x-image-meta-is_public` value is a boolean indicating whether the image is publicly available. The response's `x-image-meta-owner` value is a string which may either be null or which will indicate the owner of the image. Retrieve Raw Image Data *********************** We want to retrieve that actual raw data for a specific virtual machine image that the Glance server knows about. We have queried the Glance server for a list of images and the data returned includes the `uri` field for each available image. This `uri` field value contains the exact location needed to get the metadata for a specific image. Continuing the example from above, in order to get metadata about the first image returned, we can issue a ``HEAD`` request to the Glance server for the image's URI. We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to retrieve metadata for that image as well as the image itself encoded into the response body. The metadata is returned as a set of HTTP headers that begin with the prefix ``x-image-meta-``. The following shows an example of the HTTP headers returned from the above ``GET`` request:: x-image-meta-uri http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 x-image-meta-name Ubuntu 10.04 Plain 5GB x-image-meta-disk_format vhd x-image-meta-container_format ovf x-image-meta-size 5368709120 x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 x-image-meta-created_at 2010-02-03 09:34:01 x-image-meta-updated_at 2010-02-03 09:34:01 x-image-meta-deleted_at x-image-meta-status available x-image-meta-is_public true x-image-meta-min_ram 256 x-image-meta-min_disk 5 x-image-meta-owner null x-image-meta-property-distro Ubuntu 10.04 LTS .. note:: All timestamps returned are in UTC. The `x-image-meta-updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. There may be multiple headers that begin with the prefix `x-image-meta-property-`. These headers are free-form key/value pairs that have been saved with the image metadata. The key is the string after `x-image-meta-property-` and the value is the value of the header. The response's `Content-Length` header shall be equal to the value of the `x-image-meta-size` header. The response's `ETag` header will always be equal to the `x-image-meta-checksum` value. The response's `x-image-meta-is_public` value is a boolean indicating whether the image is publicly available. The response's `x-image-meta-owner` value is a string which may either be null or which will indicate the owner of the image. The image data itself will be the body of the HTTP response returned from the request, which will have content-type of `application/octet-stream`. Add a New Image *************** We have created a new virtual machine image in some way (created a "golden image" or snapshotted/backed up an existing image) and we wish to do two things: * Store the disk image data in Glance * Store metadata about this image in Glance We can do the above two activities in a single call to the Glance API. Assuming, like in the examples above, that a Glance API server is running at ``http://glance.openstack.example.org``, we issue a ``POST`` request to add an image to Glance:: POST http://glance.openstack.example.org/v1/images The metadata about the image is sent to Glance in HTTP headers. The body of the HTTP request to the Glance API will be the MIME-encoded disk image data. Reserve a New Image ******************* We can also perform the activities described in `Add a New Image`_ using two separate calls to the Image API; the first to register the image metadata, and the second to add the image disk data. This is known as "reserving" an image. The first call should be a ``POST`` to ``http://glance.openstack.example.org/v1/images``, which will result in a new image id being registered with a status of ``queued``:: {'image': {'status': 'queued', 'id': '71c675ab-d94f-49cd-a114-e12490b328d9', ...} ...} The image data can then be added using a ``PUT`` to ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9``. The image status will then be set to ``active`` by Glance. **Image Metadata in HTTP Headers** Glance will view as image metadata any HTTP header that it receives in a ``POST`` request where the header key is prefixed with the strings ``x-image-meta-`` and ``x-image-meta-property-``. The list of metadata headers that Glance accepts are listed below. * ``x-image-meta-name`` This header is required, unless reserving an image. Its value should be the name of the image. Note that the name of an image *is not unique to a Glance node*. It would be an unrealistic expectation of users to know all the unique names of all other user's images. * ``x-image-meta-id`` This header is optional. When present, Glance will use the supplied identifier for the image. If the identifier already exists in that Glance node, then a **409 Conflict** will be returned by Glance. The value of the header must be a uuid in hexadecimal string notation (that is 71c675ab-d94f-49cd-a114-e12490b328d9). When this header is *not* present, Glance will generate an identifier for the image and return this identifier in the response (see below). * ``x-image-meta-store`` This header is optional. Valid values are one of ``file``, ``rbd``, ``swift``, ``cinder`` or ``vsphere``. When present, Glance will attempt to store the disk image data in the backing store indicated by the value of the header. If the Glance node does not support the backing store, Glance will return a **400 Bad Request**. When not present, Glance will store the disk image data in the backing store that is marked as default. See the configuration option ``default_store`` for more information. * ``x-image-meta-disk_format`` This header is required, unless reserving an image. Valid values are one of ``aki``, ``ari``, ``ami``, ``raw``, ``iso``, ``vhd``, ``vhdx``, ``vdi``, ``qcow2``, ``vmdk`` or ``ploop``. For more information, see :ref:`formats`. * ``x-image-meta-container_format`` This header is required, unless reserving an image. Valid values are one of ``aki``, ``ari``, ``ami``, ``bare``, ``ova``, ``ovf``, or ``docker``. For more information, see :ref:`formats`. * ``x-image-meta-size`` This header is optional. When present, Glance assumes that the expected size of the request body will be the value of this header. If the length in bytes of the request body *does not match* the value of this header, Glance will return a **400 Bad Request**. When not present, Glance will calculate the image's size based on the size of the request body. * ``x-image-meta-checksum`` This header is optional. When present, it specifies the **MD5** checksum of the image file data. When present, Glance will verify the checksum generated from the back-end store while storing your image against this value and return a **400 Bad Request** if the values do not match. * ``x-image-meta-is_public`` This header is optional. When Glance finds the string "true" (case-insensitive), the image is marked as a public one, meaning that any user may view its metadata and may read the disk image from Glance. When not present, the image is assumed to be *not public* and owned by a user. * ``x-image-meta-min_ram`` This header is optional. When present, it specifies the minimum amount of RAM in megabytes required to run this image on a server. When not present, the image is assumed to have a minimum RAM requirement of 0. * ``x-image-meta-min_disk`` This header is optional. When present, it specifies the expected minimum disk space in gigabytes required to run this image on a server. When not present, the image is assumed to have a minimum disk space requirement of 0. * ``x-image-meta-owner`` This header is optional and only meaningful for admins. Glance sets the owner of an image to be the project of the authenticated user issuing the request. However, if the authenticated user has the Admin role, this default may be overridden by setting this header to null or to a string identifying the owner of the image. * ``x-image-meta-property-*`` When Glance receives any HTTP header whose key begins with the string prefix ``x-image-meta-property-``, Glance adds the key and value to a set of custom, free-form image properties stored with the image. The key is a lower-cased string following the prefix ``x-image-meta-property-`` with dashes and punctuation replaced with underscores. For example, if the following HTTP header were sent:: x-image-meta-property-distro Ubuntu 10.10 then a key/value pair of "distro"/"Ubuntu 10.10" will be stored with the image in Glance. There is no limit on the number of free-form key/value attributes that can be attached to the image. However, keep in mind that the 8K limit on the size of all the HTTP headers sent in a request will effectively limit the number of image properties. Update an Image *************** Glance will consider any HTTP header that it receives in a ``PUT`` request as an instance of image metadata. In this case, the header key should be prefixed with the strings ``x-image-meta-`` and ``x-image-meta-property-``. If an image was previously reserved, and thus is in the ``queued`` state, then image data can be added by including it as the request body. If the image already has data associated with it (for example, it is not in the ``queued`` state), then including a request body will result in a **409 Conflict** exception. On success, the ``PUT`` request will return the image metadata encoded as HTTP headers. See more about image statuses here: :ref:`image-statuses` List Image Memberships ********************** We want to see a list of projects that may access a given virtual machine image that the Glance server knows about. We take the `uri` field of the image data, append ``/members`` to it, and issue a ``GET`` request on the resulting URL. Continuing from the example above, in order to get the memberships for the first image returned, we can issue a ``GET`` request to the Glance server for ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members``. And we will get back JSON data such as the following:: {'members': [ {'member_id': 'project1', 'can_share': false} ...]} The `member_id` field identifies a project with which the image is shared. If that project is authorized to further share the image, the `can_share` field is `true`. List Shared Images ****************** We want to see a list of images which are shared with a given project. We issue a ``GET`` request to ``http://glance.openstack.example.org/v1/shared-images/project1``. We will get back JSON data such as the following:: {'shared_images': [ {'image_id': '71c675ab-d94f-49cd-a114-e12490b328d9', 'can_share': false} ...]} The `image_id` field identifies an image shared with the project named by *member_id*. If the project is authorized to further share the image, the `can_share` field is `true`. Add a Member to an Image ************************ We want to authorize a project to access a private image. We issue a ``PUT`` request to ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members/project1``. With no body, this will add the membership to the image, leaving existing memberships unmodified and defaulting new memberships to have `can_share` set to `false`. We may also optionally attach a body of the following form:: {'member': {'can_share': true} } If such a body is provided, both existing and new memberships will have `can_share` set to the provided value (either `true` or `false`). This query will return a 204 ("No Content") status code. Remove a Member from an Image ***************************** We want to revoke a project's right to access a private image. We issue a ``DELETE`` request to ``http://glance.openstack.example.org/v1/images/1/members/project1``. This query will return a 204 ("No Content") status code. Replace a Membership List for an Image ************************************** The full membership list for a given image may be replaced. We issue a ``PUT`` request to ``http://glance.openstack.example.org/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members`` with a body of the following form:: {'memberships': [ {'member_id': 'project1', 'can_share': false} ...]} All existing memberships which are not named in the replacement body are removed, and those which are named have their `can_share` settings changed as specified. (The `can_share` setting may be omitted, which will cause that setting to remain unchanged in the existing memberships.) All new memberships will be created, with `can_share` defaulting to `false` unless it is specified otherwise. Image Membership Changes in Version 2.0 --------------------------------------- Version 2.0 of the Images API eliminates the ``can_share`` attribute of image membership. In the version 2.0 model, image sharing is not transitive. In version 2.0, image members have a ``status`` attribute that reflects how the image should be treated with respect to that image member's image-list. * The ``status`` attribute may have one of three values: ``pending``, ``accepted``, or ``rejected``. * By default, only those shared images with status ``accepted`` are included in an image member's image-list. * Only an image member may change his/her own membership status. * Only an image owner may create members on an image. The status of a newly created image member is ``pending``. The image owner cannot change the status of a member. Distinctions from Version 1.x API Calls *************************************** * The response to a request to list the members of an image has changed. call: ``GET`` on ``/v2/images/{imageId}/members`` response: see the JSON schema at ``/v2/schemas/members`` * The request body in the call to create an image member has changed. call: ``POST`` to ``/v2/images/{imageId}/members`` request body:: { "member": "" } where the {memberId} is the project ID of the image member. The member status of a newly created image member is ``pending``. New API Calls ************* * Change the status of an image member call: ``PUT`` on ``/v2/images/{imageId}/members/{memberId}`` request body:: { "status": "" } where is ``pending``, ``accepted``, or ``rejected``. The {memberId} is the project ID of the image member. Resource Limits --------------- A user should always expect that an HTTP 413 error could result from any operation, indicating that a resource consumption limit has been exceeded. Some of the common situations are detailed below: * Creating an image: If your administrator has limited the total number of images that you can have, an image create may fail. Delete another image to proceed. * Uploading data to an image: If you have exceeded the total amount of image storage space allocated to you, then an upload may be rejected. To proceed, delete another image of sufficient size. Images with multiple locations count multiple times against your quota, so deleting a location may also free up space. There is also a quota on the number of upload operations (staging, copying, and uploading all count against this) so you may need to wait for other operations to complete before proceeding. * Staging data for an image: There is a separate quota on the amount of data you may have staged at any given point. To proceed, finish importing other images first or delete an image with staged data. * Importing an image: If importing an image from staging to its final destination would overrun your total image storage quota, then the import may fail. To proceed, delete other images to make space. Of course, in all over-limit situations, requesting more quota from your administrator may be an option. Images v2 Stores API -------------------- Version 2.10 of the OpenStack Images API introduces new /v2/stores/ endpoint when multiple stores is configured. The endpoint is used to delete image from specific store. Delete from Store ***************** A user wants to delete image from specific store. The user issues a ``DELETE`` request to ``/v2/stores//``. NOTE: request body is not accepted. Images v2 Tasks API ------------------- Version 2 of the OpenStack Images API introduces a Task resource that is used to create and monitor long-running asynchronous image-related processes. See the :ref:`tasks` section of the Glance documentation for more information. The following Task calls are available: Create a Task ************* A user wants to initiate a task. The user issues a ``POST`` request to ``/v2/tasks``. The request body is of Content-type ``application/json`` and must contain the following fields: * ``type``: a string specified by the enumeration defined in the Task schema * ``input``: a JSON object. The content is defined by the cloud provider who has exposed the endpoint being contacted The response is a Task entity as defined by the Task schema. It includes an ``id`` field that can be used in a subsequent call to poll the task for status changes. A task is created in ``pending`` status. Show a Task *********** A user wants to see detailed information about a task the user owns. The user issues a ``GET`` request to ``/v2/tasks/{taskId}``. The response is in ``application/json`` format. The exact structure is given by the task schema located at ``/v2/schemas/task``. List Tasks ********** A user wants to see what tasks have been created in his or her project. The user issues a ``GET`` request to ``/v2/tasks``. The response is in ``application/json`` format. The exact structure is given by the task schema located at ``/v2/schemas/tasks``. Note that, as indicated by the schema, the list of tasks is provided in a sparse format. To see more information about a particular task in the list, the user would use the show task call described above. Filtering and Sorting the Tasks List ************************************ The ``GET /v2/tasks`` request takes query parameters that server to filter the returned list of tasks. The following list details these query parameters. * ``status={status}`` Filters the list to display only those tasks in the specified status. See the task schema or the :ref:`task-statuses` section of this documentation for the legal values to use for ``{status}``. For example, a request to ``GET /v2/tasks?status=pending`` would return only those tasks whose current status is ``pending``. * ``type={type}`` Filters the list to display only those tasks of the specified type. See the enumeration defined in the task schema for the legal values to use for ``{type}``. For example, a request to ``GET /v2/tasks?type=import`` would return only import tasks. * ``sort_dir={direction}`` Sorts the list of tasks according to ``updated_at`` datetime. Legal values are ``asc`` (ascending) and ``desc`` (descending). By default, the task list is sorted by ``created_at`` time in descending chronological order. API Message Localization ------------------------ Glance supports HTTP message localization. For example, an HTTP client can receive API messages in Chinese even if the locale language of the server is English. How to use it ************* To receive localized API messages, the HTTP client needs to specify the **Accept-Language** header to indicate the language that will translate the message. For more information about Accept-Language, please refer to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html A typical curl API request will be like below:: curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' http://glance.openstack.example.org/v2/images/aaa Then the response will be like the following:: HTTP/1.1 404 Not Found Content-Length: 234 Content-Type: text/html; charset=UTF-8 X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a Date: Sat, 22 Feb 2014 06:26:26 GMT 404 Not Found

404 Not Found

找不到任何具有标识 aaa 的映像

.. note:: Make sure to have a language package under /usr/share/locale-langpack/ on the target Glance server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/glanceclient.rst0000664000175000017500000000210200000000000021256 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Client Tools =========================== The command-line tool and python library for Glance are both installed through the python-glanceclient project. Explore the following resources for more information: * `Official Docs `_ * `Pypi Page `_ * `GitHub Project `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/glancemetadefcatalogapi.rst0000664000175000017500000004601600000000000023446 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Metadata Definitions Catalog Public APIs ======================================================= A common API hosted by the Glance service for vendors, admins, services, and users to meaningfully define available key / value pair and tag metadata. The intent is to enable better metadata collaboration across artifacts, services, and projects for OpenStack users. This is about the definition of the available metadata that can be used on different types of resources (images, artifacts, volumes, flavors, aggregates, etc). A definition includes the properties type, its key, its description, and its constraints. This catalog will not store the values for specific instance properties. For example, a definition of a virtual CPU topology property for number of cores will include the key to use, a description, and value constraints like requiring it to be an integer. So, a user, potentially through Horizon, would be able to search this catalog to list the available properties they can add to a flavor or image. They will see the virtual CPU topology property in the list and know that it must be an integer. In the Horizon example, when the user adds the property, its key and value will be stored in the service that owns that resource (Nova for flavors and in Glance for images). Diagram: https://wiki.openstack.org/w/images/b/bb/Glance-Metadata-API.png Glance Metadata Definitions Catalog implementation started with API version v2. Authentication -------------- Glance depends on Keystone and the OpenStack Identity API to handle authentication of clients. You must obtain an authentication token from Keystone send it along with all API requests to Glance through the ``X-Auth-Token`` header. Glance will communicate back to Keystone to verify the token validity and obtain your identity credentials. See :ref:`authentication` for more information on integrating with Keystone. Using v2.X ---------- For the purpose of examples, assume there is a Glance API server running at the URL ``http://glance.openstack.example.org`` on the default port 80. List Available Namespaces ************************* We want to see a list of available namespaces that the authenticated user has access to. This includes namespaces owned by the user, namespaces shared with the user and public namespaces. We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces`` to retrieve this list of available namespaces. The data is returned as a JSON-encoded mapping in the following format:: { "namespaces": [ { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true, "owner": "The Test Owner", "self": "/v2/metadefs/namespaces/MyNamespace", "schema": "/v2/schemas/metadefs/namespace", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "resource_type_associations": [ { "name": "OS::Nova::Aggregate", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Nova::Flavor", "prefix": "aggregate_instance_extra_specs:", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" } ] } ], "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", "schema": "/v2/schemas/metadefs/namespaces" } .. note:: Listing namespaces will only show the summary of each namespace including counts and resource type associations. Detailed response including all its objects definitions, property definitions etc. will only be available on each individual GET namespace request. Filtering Namespaces Lists ************************** ``GET /v2/metadefs/namespaces`` requests take query parameters that serve to filter the returned list of namespaces. The following list details these query parameters. * ``resource_types=RESOURCE_TYPES`` Filters namespaces having a ``resource_types`` within the list of comma separated ``RESOURCE_TYPES``. GET resource also accepts additional query parameters: * ``sort_key=KEY`` Results will be ordered by the specified sort attribute ``KEY``. Accepted values include ``namespace``, ``created_at`` (default) and ``updated_at``. * ``sort_dir=DIR`` Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` for ascending or ``desc`` (default) for descending. * ``marker=NAMESPACE`` A namespace identifier marker may be specified. When present only namespaces which occur after the identifier ``NAMESPACE`` will be listed, i.e. the namespaces which have a `sort_key` later than that of the marker ``NAMESPACE`` in the `sort_dir` direction. * ``limit=LIMIT`` When present the maximum number of results returned will not exceed ``LIMIT``. .. note:: If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) then the number of results returned may be less than ``LIMIT``. * ``visibility=PUBLIC`` An admin user may use the `visibility` parameter to control which results are returned (PRIVATE or PUBLIC). Retrieve Namespace ****************** We want to see a more detailed information about a namespace that the authenticated user has access to. The detail includes the properties, objects, and resource type associations. We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}`` to retrieve the namespace details. The data is returned as a JSON-encoded mapping in the following format:: { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true, "owner": "The Test Owner", "schema": "/v2/schemas/metadefs/namespace", "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Nova::Flavor", "prefix": "filter1:", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" } ], "properties": { "nsprop1": { "title": "My namespace property1", "description": "More info here", "type": "boolean", "default": true }, "nsprop2": { "title": "My namespace property2", "description": "More info here", "type": "string", "default": "value1" } }, "objects": [ { "name": "object1", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "required": [], "properties": { "prop1": { "title": "My object1 property1", "description": "More info here", "type": "array", "items": { "type": "string" } } } }, { "name": "object2", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "properties": { "prop1": { "title": "My object2 property1", "description": "More info here", "type": "integer", "default": 20 } } } ] } Retrieve available Resource Types ********************************* We want to see the list of all resource types that are available in Glance We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/resource_types`` to retrieve all resource types. The data is returned as a JSON-encoded mapping in the following format:: { "resource_types": [ { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Glance::Image", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Cinder::Volume", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Flavor", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Aggregate", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Server", "updated_at": "2014-08-28T17:13:04Z" } ] } Retrieve Resource Types associated with a Namespace *************************************************** We want to see the list of resource types that are associated for a specific namespace We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/resource_types`` to retrieve resource types. The data is returned as a JSON-encoded mapping in the following format:: { "resource_type_associations" : [ { "name" : "OS::Glance::Image", "prefix" : "hw_", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" }, { "name" :"OS::Cinder::Volume", "prefix" : "hw_", "properties_target" : "image", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" }, { "name" : "OS::Nova::Flavor", "prefix" : "hw:", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" } ] } Add Namespace ************* We want to create a new namespace that can contain the properties, objects, etc. We issue a ``POST`` request to add an namespace to Glance:: POST http://glance.openstack.example.org/v2/metadefs/namespaces/ The input data is an JSON-encoded mapping in the following format:: { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true } .. note:: Optionally properties, objects and resource type associations could be added in the same input. See GET Namespace output above(input will be similar). Update Namespace **************** We want to update an existing namespace We issue a ``PUT`` request to update an namespace to Glance:: PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} The input data is similar to Add Namespace Delete Namespace **************** We want to delete an existing namespace including all its objects, properties etc. We issue a ``DELETE`` request to delete an namespace to Glance:: DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} Associate Resource Type with Namespace ************************************** We want to associate a resource type with an existing namespace We issue a ``POST`` request to associate resource type to Glance:: POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/resource_types The input data is an JSON-encoded mapping in the following format:: { "name" :"OS::Cinder::Volume", "prefix" : "hw_", "properties_target" : "image", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" } Remove Resource Type associated with a Namespace ************************************************ We want to de-associate namespace from a resource type We issue a ``DELETE`` request to de-associate namespace resource type to Glance:: DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/resource_types/{resource_type} List Objects in Namespace ************************* We want to see the list of meta definition objects in a specific namespace We issue a ``GET`` request to ``http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects`` to retrieve objects. The data is returned as a JSON-encoded mapping in the following format:: { "objects": [ { "name": "object1", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "required": [], "properties": { "prop1": { "title": "My object1 property1", "description": "More info here", "type": "array", "items": { "type": "string" } } } }, { "name": "object2", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "properties": { "prop1": { "title": "My object2 property1", "description": "More info here", "type": "integer", "default": 20 } } } ], "schema": "/v2/schemas/metadefs/objects" } Add object in a specific namespace ********************************** We want to create a new object which can group the properties We issue a ``POST`` request to add object to a namespace in Glance:: POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects The input data is an JSON-encoded mapping in the following format:: { "name": "StorageQOS", "description": "Our available storage QOS.", "required": [ "minIOPS" ], "properties": { "minIOPS": { "type": "integer", "description": "The minimum IOPs required", "default": 100, "minimum": 100, "maximum": 30000369 }, "burstIOPS": { "type": "integer", "description": "The expected burst IOPs", "default": 1000, "minimum": 100, "maximum": 30000377 } } } Update Object in a specific namespace ************************************* We want to update an existing object We issue a ``PUT`` request to update an object to Glance:: PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects/{object_name} The input data is similar to Add Object Delete Object in a specific namespace ************************************* We want to delete an existing object. We issue a ``DELETE`` request to delete object in a namespace to Glance:: DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/objects/{object_name} Add property definition in a specific namespace *********************************************** We want to create a new property definition in a namespace We issue a ``POST`` request to add property definition to a namespace in Glance:: POST http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties The input data is an JSON-encoded mapping in the following format:: { "name": "hypervisor_type", "title" : "Hypervisor", "type": "array", "description": "The type of hypervisor required", "items": { "type": "string", "enum": [ "hyperv", "qemu", "kvm" ] } } Update property definition in a specific namespace ************************************************** We want to update an existing object We issue a ``PUT`` request to update an property definition in a namespace to Glance:: PUT http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties/{property_name} The input data is similar to Add property definition Delete property definition in a specific namespace ************************************************** We want to delete an existing object. We issue a ``DELETE`` request to delete property definition in a namespace to Glance:: DELETE http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace}/properties/{property_name} API Message Localization ------------------------ Glance supports HTTP message localization. For example, an HTTP client can receive API messages in Chinese even if the locale language of the server is English. How to use it ************* To receive localized API messages, the HTTP client needs to specify the **Accept-Language** header to indicate the language to use to translate the message. For more info about Accept-Language, please refer http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html A typical curl API request will be like below:: curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' http://glance.openstack.example.org/v2/metadefs/namespaces/{namespace} Then the response will be like the following:: HTTP/1.1 404 Not Found Content-Length: 234 Content-Type: text/html; charset=UTF-8 X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a Date: Sat, 22 Feb 2014 06:26:26 GMT 404 Not Found

404 Not Found

找不到任何具有标识 aaa 的映像

.. note:: Be sure there is the language package under /usr/share/locale-langpack/ on the target Glance server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/identifiers.rst0000664000175000017500000000201400000000000021135 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Image Identifiers ================= Images are uniquely identified by way of a URI that matches the following signature:: /v1/images/ where `` is the resource location of the Glance service that knows about an image, and `` is the image's identifier. Image identifiers in Glance are *uuids*, making them *globally unique*. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/index.rst0000664000175000017500000000040000000000000017734 0ustar00zuulzuul00000000000000================= Glance User Guide ================= .. toctree:: :maxdepth: 2 identifiers statuses formats common-image-properties metadefs-concepts glanceapi glanceclient glancemetadefcatalogapi signature os_hash_algo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/metadefs-concepts.rst0000664000175000017500000002134200000000000022241 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 Hewlett-Packard Development Company, L.P. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Metadata Definition Concepts ============================ The metadata definition service was added to Glance in the Juno release of OpenStack. It provides a common API for vendors, admins, services, and users to meaningfully **define** available key / value pair metadata that can be used on different types of resources (images, artifacts, volumes, flavors, aggregates, and other resources). A definition includes a property's key, its description, its constraints, and the resource types to which it can be associated. This catalog does not store the values for specific instance properties. For example, a definition of a virtual CPU topology property for the number of cores will include the base key to use (for example, cpu_cores), a description, and value constraints like requiring it to be an integer. So, a user, potentially through Horizon, would be able to search this catalog to list the available properties they can add to a flavor or image. They will see the virtual CPU topology property in the list and know that it must be an integer. When the user adds the property its key and value will be stored in the service that owns that resource (for example, Nova for flavors and in Glance for images). The catalog also includes any additional prefix required when the property is applied to different types of resources, such as "hw\_" for images and "hw:" for flavors. So, on an image, the user would know to set the property as "hw_cpu_cores=1". .. note:: Resource manipulation via this API is restricted to admins by default since the Wallaby release. This API does not provide limits suitable for exposure to all users, and can also leak information between users unintentionally. Even as an admin, be careful with the names you use for resources you create that are intended to be private, in order to avoid unintentional exposure. See Bug 1916926_ for more information. .. _1916926: https://bugs.launchpad.net/glance/+bug/1916926/ Terminology ----------- Background ~~~~~~~~~~ The term *metadata* can become very overloaded and confusing. This catalog is about the additional metadata that is passed as arbitrary key / value pairs or tags across various artifacts and OpenStack services. Below are a few examples of the various terms used for metadata across OpenStack services today: +-------------------------+---------------------------+----------------------+ | Nova | Cinder | Glance | +=========================+===========================+======================+ | Flavor | Volume & Snapshot | Image & Snapshot | | + *extra specs* | + *image metadata* | + *properties* | | Host Aggregate | + *metadata* | + *tags* | | + *metadata* | VolumeType | | | Servers | + *extra specs* | | | + *metadata* | + *qos specs* | | | + *scheduler_hints* | | | | + *tags* | | | +-------------------------+---------------------------+----------------------+ Catalog Concepts ~~~~~~~~~~~~~~~~ The below figure illustrates the concept terminology used in the metadata definitions catalog:: A namespace is associated with 0 to many resource types, making it visible to the API / UI for applying to that type of resource. RBAC Permissions are managed at a namespace level. +----------------------------------------------+ | Namespace | | | | +-----------------------------------------+ | | | Object Definition | | | | | | +--------------------+ | | +-------------------------------------+ | | +--> | Resource Type: | | | | Property Definition A (key=integer) | | | | | e.g. Nova Flavor | | | +-------------------------------------+ | | | +--------------------+ | | | | | | | +-------------------------------------+ | | | | | | Property Definition B (key=string) | | | | +--------------------+ | | +-------------------------------------+ | +--+--> | Resource Type: | | | | | | | e.g. Glance Image | | +-----------------------------------------+ | | +--------------------+ | | | | +-------------------------------------+ | | | | Property Definition C (key=boolean) | | | +--------------------+ | +-------------------------------------+ | +--> | Resource Type: | | | | e.g. Cinder Volume | +----------------------------------------------+ +--------------------+ Properties may be defined standalone or within the context of an object. Catalog Terminology ~~~~~~~~~~~~~~~~~~~ The following terminology is used within the metadata definition catalog. **Namespaces** Metadata definitions are contained in namespaces. - Specify the access controls (CRUD) for everything defined in it. Allows for admin only, different projects, or the entire cloud to define and use the definitions in the namespace - Associates the contained definitions to different types of resources **Properties** A property describes a single property and its primitive constraints. Each property can ONLY be a primitive type: * string, integer, number, boolean, array Each primitive type is described using simple JSON schema notation. This means NO nested objects and no definition referencing. **Objects** An object describes a group of one to many properties and their primitive constraints. Each property in the group can ONLY be a primitive type: * string, integer, number, boolean, array Each primitive type is described using simple JSON schema notation. This means NO nested objects. The object may optionally define required properties under the semantic understanding that a user who uses the object should provide all required properties. **Resource Type Association** Resource type association specifies the relationship between resource types and the namespaces that are applicable to them. This information can be used to drive UI and CLI views. For example, the same namespace of objects, properties, and tags may be used for images, snapshots, volumes, and flavors. Or a namespace may only apply to images. Resource types should be aligned with Heat resource types whenever possible. https://docs.openstack.org/heat/latest/template_guide/openstack.html It is important to note that the same base property key can require different prefixes depending on the target resource type. The API provides a way to retrieve the correct property based on the target resource type. Below are a few examples: The desired virtual CPU topology can be set on both images and flavors via metadata. The keys have different prefixes on images than on flavors. On flavors keys are prefixed with ``hw:``, but on images the keys are prefixed with ``hw_``. For more: https://github.com/openstack/nova-specs/blob/master/specs/juno/implemented/virt-driver-vcpu-topology.rst Another example is the AggregateInstanceExtraSpecsFilter and scoped properties (e.g. properties with something:something=value). For scoped / namespaced properties, the AggregateInstanceExtraSpecsFilter requires a prefix of "aggregate_instance_extra_specs:" to be used on flavors but not on the aggregate itself. Otherwise, the filter will not evaluate the property during scheduling. So, on a host aggregate, you may see: companyx:fastio=true But then when used on the flavor, the AggregateInstanceExtraSpecsFilter needs: aggregate_instance_extra_specs:companyx:fastio=true In some cases, there may be multiple different filters that may use the same property with different prefixes. In this case, the correct prefix needs to be set based on which filter is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/os_hash_algo.rst0000664000175000017500000001111200000000000021255 0ustar00zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ========================================= Secure Hash Algorithm Support (Multihash) ========================================= The Secure Hash Algorithm feature adds image properties that may be used to verify image integrity based on its hash. The Secure Hash consists of two new image properties: ``os_hash_algo`` Contains the name of the secure hash algorithm used to generate the value on the image ``os_hash_value`` The hexdigest computed by applying the secure hash algorithm named in the ``os_hash_algo`` property to the image data Image Verification ================== When Secure Hash is used, the Glance image properties will include the two fields ``os_hash_algo`` and ``os_hash_value``. These two fields provide the hashing algorithm used to calculate the secure hash, along with the hash value calculated for the image. These values can be used to verify the image integrity when used. For example, an image and its properties may be viewed with the following:: $ glance image-show fa33e3cd-5fe4-46df-a604-1e9b9438b420 +------------------+----------------------------------------------------------------------------------+ | Property | Value | +------------------+----------------------------------------------------------------------------------+ | checksum | ffa3dd42fae539dcd8fe72d429bc677b | | container_format | bare | | created_at | 2019-06-05T13:39:46Z | | disk_format | qcow2 | | id | fa33e3cd-5fe4-46df-a604-1e9b9438b420 | | min_disk | 10 | | min_ram | 1024 | | name | fedora-30 | | os_hash_algo | sha512 | | os_hash_value | d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f5 | | | 5e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 | | os_hidden | False | | owner | 0e82e8f863a4485fabfbed1b5b856cd7 | | protected | False | | size | 332267520 | | status | active | | tags | [] | | updated_at | 2019-06-07T11:41:12Z | | virtual_size | Not available | | visibility | public | +------------------+----------------------------------------------------------------------------------+ From that output, we can see the ``os_hash_algo`` property shows that **sha512** was used to generate the multihash. The ``os_hash_value`` then shows the generated hash value is:: d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f55e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 When downloading the image, you may now use these values to be able to verify the integrity of the image. For example:: $ glance image-download fa33e3cd-5fe4-46df-a604-1e9b9438b420 --file fedora-30 $ sha512sum fedora-30 d9f99d22a6b6ea1e8b93379dd2080f51a7ed6885aa7d4c2f2262ea1054935e02c47b45f9b56aa7f55e61d149d06f4ff6de03efde24f9d6774baf35f08c5e9d92 Using the ``sha512sum`` command, we are able to calculate the hash locally on the image and verify it matches what was expected. If the output were not to match, that would indicate the image has somehow been modified or corrupted since being uploaded to Glance, and should likely not be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/signature.rst0000664000175000017500000001421100000000000020633 0ustar00zuulzuul00000000000000.. Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Image Signature Verification ============================= Glance has the ability to perform image validation using a digital signature and asymmetric cryptography. To trigger this, you must define specific image properties (described below), and have stored a certificate signed with your private key in a local Barbican installation. When the image properties exist on an image, Glance will validate the uploaded image data against these properties before storing it. If validation is unsuccessful, the upload will fail and the image will be deleted. Additionally, the image properties may be used by other services (for example, Nova) to perform data verification when the image is downloaded from Glance. Requirements ------------ Barbican key manager - See https://docs.openstack.org/barbican/latest/contributor/devstack.html Configuration ------------- The etc/glance-api.conf can be modified to change keystone endpoint of barbican. By default barbican will try to connect to keystone at http://localhost:5000/v3 but if keystone is on another host then this should be changed. In glance-api.conf find the following lines:: [barbican] auth_endpoint = http://localhost:5000/v3 Then replace http://localhost:5000/v3 with the URL of keystone, also adding /v3 to the end of it. For example, 'https://192.168.245.9:5000/v3'. Another option in etc/glance-api.conf which can be configured is which key manager to use. By default Glance will use the default key manager defined by the Castellan key manager interface, which is currently the Barbican key manager. In glance-api.conf find the following lines:: [key_manager] backend = barbican Then replace the value with the desired key manager class. .. note:: If those lines do not exist then simply add them to the end of the file. Using the Signature Verification -------------------------------- An image will need a few properties for signature verification to be enabled, these are:: img_signature img_signature_hash_method img_signature_key_type img_signature_certificate_uuid Property img_signature ~~~~~~~~~~~~~~~~~~~~~~ This is the signature of your image. .. note:: The max character limit is 255. Property img_signature_hash_method ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hash methods is the method you hash with. Current ones you can use are: * SHA-224 * SHA-256 * SHA-384 * SHA-512 Property img_signature_key_type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is the key_types you can use for your image. Current ones you can use are: * RSA-PSS * DSA * ECC-CURVES * SECT571K1 * SECT409K1 * SECT571R1 * SECT409R1 * SECP521R1 * SECP384R1 .. Note:: ECC curves - Only keysizes above 384 are included. Not all ECC curves may be supported by the back end. Property img_signature_certificate_uuid ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is the UUID of the certificate that you upload to Barbican. Therefore the type passed to glance is: * UUID .. Note:: The supported certificate types are: * X_509 Example Usage ------------- Follow these instructions to create your keys:: $ openssl genrsa -out private_key.pem 1024 Generating RSA private key, 1024 bit long modulus ...............................................++++++ ..++++++ e is 65537 (0x10001) $ openssl rsa -pubout -in private_key.pem -out public_key.pem writing RSA key $ openssl req -new -key private_key.pem -out cert_request.csr You are about to be asked to enter information that will be incorporated into your certificate request. $ openssl x509 -req -days 14 -in cert_request.csr -signkey private_key.pem -out new_cert.crt Signature ok subject=/C=AU/ST=Some-State/O=Internet Widgits Pty Ltd Getting Private key Upload your certificate. This only has to be done once as you can use the same ``Secret href`` for many images until it expires. .. code-block:: console $ openstack secret store --name test --algorithm RSA --expiration 2016-06-29 --secret-type certificate --payload-content-type "application/octet-stream" --payload-content-encoding base64 --payload "$(base64 new_cert.crt)" +---------------+-----------------------------------------------------------------------+ | Field | Value | +---------------+-----------------------------------------------------------------------+ | Secret href | http://127.0.0.1:9311/v1/secrets/cd7cc675-e573-419c-8fff-33a72734a243 | $ cert_uuid=cd7cc675-e573-419c-8fff-33a72734a243 Get an image and create the signature:: $ echo This is a dodgy image > myimage $ openssl dgst -sha256 -sign private_key.pem -sigopt rsa_padding_mode:pss -out myimage.signature myimage $ base64 -w 0 myimage.signature > myimage.signature.b64 $ image_signature=$(cat myimage.signature.b64) .. note:: Using Glance v1 requires '-w 0' due to not supporting multiline image properties. Glance v2 does support multiline image properties and does not require '-w 0' but may still be used. Create the image:: $ glance image-create --name mySignedImage --container-format bare --disk-format qcow2 --property img_signature="$image_signature" --property img_signature_certificate_uuid="$cert_uuid" --property img_signature_hash_method='SHA-256' --property img_signature_key_type='RSA-PSS' < myimage .. note:: Creating the image can fail if validation does not succeed. This will cause the image to be deleted. Other Links ----------- * https://etherpad.openstack.org/p/mitaka-glance-image-signing-instructions * https://wiki.openstack.org/wiki/OpsGuide/User-Facing_Operations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/source/user/statuses.rst0000664000175000017500000001361300000000000020512 0ustar00zuulzuul00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _image-statuses: Image Statuses ============== Images in Glance can be in one of the following statuses: * ``queued`` The image identifier has been reserved for an image in the Glance registry. No image data has been uploaded to Glance and the image size was not explicitly set to zero on creation. * ``saving`` Denotes that an image's raw data is currently being uploaded to Glance. When an image is registered with a call to `POST /images` and there is an `x-image-meta-location` header present, that image will never be in the `saving` status (as the image data is already available in some other location). * ``uploading`` Denotes that an import data-put call has been made. While in this status, a call to `PUT /file` is disallowed. (Note that a call to `PUT /file` on a queued image puts the image into saving status. Calls to `PUT /stage` are disallowed while an image is in saving status. Thus it's not possible to use both upload methods on the same image.) * ``importing`` Denotes that an import call has been made but that the image is not yet ready for use. * ``active`` Denotes an image that is fully available in Glance. This occurs when the image data is uploaded, or the image size is explicitly set to zero on creation. * ``deactivated`` Denotes that access to image data is not allowed to any non-admin user. Prohibiting downloads of an image also prohibits operations like image export and image cloning that may require image data. * ``killed`` Denotes that an error occurred during the uploading of an image's data, and that the image is not readable. * ``deleted`` Glance has retained the information about the image, but it is no longer available to use. An image in this state will be removed automatically at a later date. * ``pending_delete`` This is similar to `deleted`, however, Glance has not yet removed the image data. An image in this state is not recoverable. .. figure:: ../images/image_status_transition.png :figwidth: 100% :align: center :alt: The states consist of: "queued", "saving", "active", "pending_delete", "deactivated", "uploading", "importing", "killed", and "deleted". The transitions consist of: An initial transition to the "queued" state called "create image". A transition from the "queued" state to the "active" state called "add location". A transition from the "queued" state to the "saving" state called "upload". A transition from the "queued" state to the "uploading" state called "stage upload". A transition from the "queued" state to the "deleted" state called "delete". A transition from the "saving" state to the "active" state called "upload succeeded". A transition from the "saving" state to the "deleted" state called "delete". A transition from the "saving" state to the "killed" state called "[v1] upload fail". A transition from the "saving" state to the "queued" state called "[v2] upload fail". A transition from the "uploading" state to the "importing" state called "import". A transition from the "uploading" state to the "queued" state called "stage upload fail". A transition from the "uploading" state to the "deleted" state called "delete". A transition from the "importing" state to the "active" state called "import succeed". A transition from the "importing" state to the "queued" state called "import fail". A transition from the "importing" state to the "deleted" state called "delete". A transition from the "active" state to the "deleted" state called "delete". A transition from the "active" state to the "pending_delete" state called "delayed delete". A transition from the "active" state to the "deactivated" state called "deactivate". A transition from the "killed" state to the "deleted" state called "deleted". A transition from the "pending_delete" state to the "deleted" state called "after scrub time". A transition from the "deactivated" state to the "deleted" state called "delete". A transition from the "deactivated" state to the "active" state called "reactivate". There are no transitions out of the "deleted" state. This is a representation of how the image move from one status to the next. * Add location from zero to more than one. .. _task-statuses: Task Statuses ============= Tasks in Glance can be in one of the following statuses: * ``pending`` The task identifier has been reserved for a task in the Glance. No processing has begun on it yet. * ``processing`` The task has been picked up by the underlying executor and is being run using the backend Glance execution logic for that task type. * ``success`` Denotes that the task has had a successful run within Glance. The ``result`` field of the task shows more details about the outcome. * ``failure`` Denotes that an error occurred during the execution of the task and it cannot continue processing. The ``message`` field of the task shows what the error was. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8302999 glance-29.0.0/doc/test/0000775000175000017500000000000000000000000014602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/doc/test/redirect-tests.txt0000664000175000017500000001102000000000000020276 0ustar00zuulzuul00000000000000# This file contains tests for redirects to handle existing URLs for docs that # have been moved. See https://docs.openstack.org/whereto/latest/ for details. ### test files moved by commit 1c7f556d4f77d2dd7f282f2b41bdfb2abb6e5740 # to: admin /glance/austin/apache-httpd.html 301 /glance/austin/admin/apache-httpd.html /glance/bexar/authentication.html 301 /glance/bexar/admin/authentication.html /glance/cactus/cache.html 301 /glance/cactus/admin/cache.html /glance/diablo/controllingservers.html 301 /glance/diablo/admin/controllingservers.html /glance/essex/db-sqlalchemy-migrate.html 301 /glance/essex/admin/db-sqlalchemy-migrate.html /glance/folsom/db.html 301 /glance/folsom/admin/db.html /glance/grizzly/flows.html 301 /glance/grizzly/admin/flows.html /glance/havana/notifications.html 301 /glance/havana/admin/notifications.html /glance/icehouse/policies.html 301 /glance/icehouse/admin/policies.html /glance/juno/property-protections.html 301 /glance/juno/admin/property-protections.html /glance/kilo/requirements.html 301 /glance/kilo/admin/requirements.html /glance/liberty/rollingupgrades.html 301 /glance/liberty/admin/rollingupgrades.html /glance/mitaka/tasks.html 301 /glance/mitaka/admin/tasks.html # to: configuration /glance/newton/configuring.html 301 /glance/newton/configuration/configuring.html /glance/ocata/opts/glance_api.html 301 /glance/ocata/configuration/glance_api.html /glance/pike/opts/glance_cache.html 301 /glance/pike/configuration/glance_cache.html /glance/queens/opts/glance_manage.html 301 /glance/queens/configuration/glance_manage.html /glance/rocky/opts/glance_registry.html 301 /glance/rocky/configuration/glance_registry.html /glance/stein/opts/glance_scrubber.html 301 /glance/stein/configuration/glance_scrubber.html /glance/thompson/opts/index.html 301 /glance/thompson/configuration/index.html /glance/uvula/sample-configuration.html 301 /glance/uvula/configuration/sample-configuration.html # to: contributor /glance/violin/architecture.html 301 /glance/violin/contributor/architecture.html /glance/watt/contributing/blueprints.html 301 /glance/watt/contributor/blueprints.html /glance/xylophone/database_architecture.html 301 /glance/xylophone/contributor/database_architecture.html /glance/yaml/database_migrations.html 301 /glance/yaml/contributor/database_migrations.html /glance/zero/contributing/documentation.html 301 /glance/zero/contributor/documentation.html /glance/latest/domain_implementation.html 301 /glance/latest/contributor/domain_implementation.html /glance/latest/domain_model.html 301 /glance/latest/contributor/domain_model.html /glance/latest/contributing/index.html 301 /glance/latest/contributor/index.html /glance/latest/contributing/minor-code-changes.html 301 /glance/latest/contributor/minor-code-changes.html /glance/latest/contributing/refreshing-configs.html 301 /glance/latest/contributor/refreshing-configs.html /glance/latest/contributing/release-cpl.html 301 /glance/latest/contributor/release-cpl.html # to: user /glance/latest/common-image-properties.html 301 /glance/latest/user/common-image-properties.html /glance/latest/formats.html 301 /glance/latest/user/formats.html /glance/latest/glanceapi.html 301 /glance/latest/user/glanceapi.html /glance/latest/glanceclient.html 301 /glance/latest/user/glanceclient.html /glance/latest/glancemetadefcatalogapi.html 301 /glance/latest/user/glancemetadefcatalogapi.html /glance/latest/identifiers.html 301 /glance/latest/user/identifiers.html /glance/latest/metadefs-concepts.html 301 /glance/latest/user/metadefs-concepts.html /glance/latest/signature.html 301 /glance/latest/user/signature.html /glance/latest/statuses.html 301 /glance/latest/user/statuses.html # to: cli /glance/latest/man/glanceapi.html 301 /glance/latest/cli/glanceapi.html /glance/latest/man/glancecachecleaner.html 301 /glance/latest/cli/glancecachecleaner.html /glance/latest/man/glancecachemanage.html 301 /glance/latest/cli/glancecachemanage.html /glance/latest/man/glancecacheprefetcher.html 301 /glance/latest/cli/glancecacheprefetcher.html /glance/latest/man/glancecachepruner.html 301 /glance/latest/cli/glancecachepruner.html /glance/latest/man/glancecontrol.html 301 /glance/latest/cli/glancecontrol.html /glance/latest/man/glancemanage.html 301 /glance/latest/cli/glancemanage.html /glance/latest/man/glanceregistry.html 301 /glance/latest/cli/glanceregistry.html /glance/latest/man/glancereplicator.html 301 /glance/latest/cli/glancereplicator.html /glance/latest/man/glancescrubber.html 301 /glance/latest/cli/glancescrubber.html ### end: test files moved by commit 1c7f556d4f77d2dd7f282f2b41bdfb2abb6e5740 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8302999 glance-29.0.0/etc/0000775000175000017500000000000000000000000013631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-api-paste.ini0000664000175000017500000000630600000000000017451 0ustar00zuulzuul00000000000000# Use this composite for no auth or image caching - DEFAULT [composite:glance-api] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck # Use this composite for image caching and no auth [composite:glance-api-caching] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck # Use this composite for caching w/ management interface but no auth [composite:glance-api-cachemanagement] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck # Use this composite for keystone auth [composite:glance-api-keystone] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck # Use this composite for keystone auth with image caching [composite:glance-api-keystone+caching] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck # Use this composite for keystone auth with caching and cache management [composite:glance-api-keystone+cachemanagement] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck [composite:api] paste.composite_factory = glance.api:pipeline_factory default = cors http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context rootapp caching = cors http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache rootapp cachemanagement = cors http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp keystone = cors http_proxy_to_wsgi versionnegotiation osprofiler authtoken context rootapp keystone+caching = cors http_proxy_to_wsgi versionnegotiation osprofiler authtoken context cache rootapp keystone+cachemanagement = cors http_proxy_to_wsgi versionnegotiation osprofiler authtoken context cache cachemanage rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cachemanage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory delay_auth_decision = true [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = glance oslo_config_program = glance-api [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-api.conf0000664000175000017500000062060300000000000016507 0ustar00zuulzuul00000000000000[DEFAULT] # # From glance.api # # # Allow limited access to unauthenticated users. # # Assign a boolean to determine API access for unauthenticated # users. When set to False, the API cannot be accessed by # unauthenticated users. When set to True, unauthenticated users can # access the API with read-only privileges. This however only applies # when using ContextMiddleware. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #allow_anonymous_access = false # # Limit the request ID length. # # Provide an integer value to limit the length of the request ID to # the specified length. The default value is 64. Users can change this # to any ineteger value between 0 and 16384 however keeping in mind that # a larger value may flood the logs. # # Possible values: # * Integer value between 0 and 16384 # # Related options: # * None # # (integer value) # Minimum value: 0 #max_request_id_length = 64 # # Public url endpoint to use for Glance versions response. # # This is the public url endpoint that will appear in the Glance # "versions" response. If no value is specified, the endpoint that is # displayed in the version's response is that of the host running the # API service. Change the endpoint to represent the proxy URL if the # API service is running behind a proxy. If the service is running # behind a load balancer, add the load balancer's URL for this value. # # Possible values: # * None # * Proxy URL # * Load balancer URL # # Related options: # * None # # (string value) #public_endpoint = # # Secure hashing algorithm used for computing the 'os_hash_value' property. # # This option configures the Glance "multihash", which consists of two # image properties: the 'os_hash_algo' and the 'os_hash_value'. The # 'os_hash_algo' will be populated by the value of this configuration # option, and the 'os_hash_value' will be populated by the hexdigest computed # when the algorithm is applied to the uploaded or imported image data. # # The value must be a valid secure hash algorithm name recognized by the # python 'hashlib' library. You can determine what these are by examining # the 'hashlib.algorithms_available' data member of the version of the # library being used in your Glance installation. For interoperability # purposes, however, we recommend that you use the set of secure hash # names supplied by the 'hashlib.algorithms_guaranteed' data member because # those algorithms are guaranteed to be supported by the 'hashlib' library # on all platforms. Thus, any image consumer using 'hashlib' locally should # be able to verify the 'os_hash_value' of the image. # # The default value of 'sha512' is a performant secure hash algorithm. # # If this option is misconfigured, any attempts to store image data will fail. # For that reason, we recommend using the default value. # # Possible values: # * Any secure hash algorithm name recognized by the Python 'hashlib' # library # # Related options: # * None # # (string value) #hashing_algorithm = sha512 # # Maximum number of image members per image. # # This limits the maximum of users an image can be shared with. Any negative # value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_member_quota = 128 # # Maximum number of properties allowed on an image. # # This enforces an upper limit on the number of additional properties an image # can have. Any negative value is interpreted as unlimited. # # (integer value) #image_property_quota = 128 # # Maximum number of tags allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_tag_quota = 128 # # Maximum number of locations allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_location_quota = 10 # # The default number of results to return for a request. # # Responses to certain API requests, like list images, may return # multiple items. The number of results returned can be explicitly # controlled by specifying the ``limit`` parameter in the API request. # However, if a ``limit`` parameter is not specified, this # configuration value will be used as the default number of results to # be returned for any API request. # # NOTES: # * The value of this configuration option may not be greater than # the value specified by ``api_limit_max``. # * Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * api_limit_max # # (integer value) # Minimum value: 1 #limit_param_default = 25 # # Maximum number of results that could be returned by a request. # # As described in the help text of ``limit_param_default``, some # requests may return multiple results. The number of results to be # returned are governed either by the ``limit`` parameter in the # request or the ``limit_param_default`` configuration option. # The value in either case, can't be greater than the absolute maximum # defined by this configuration option. Anything greater than this # value is trimmed down to the maximum value defined here. # # NOTE: Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * limit_param_default # # (integer value) # Minimum value: 1 #api_limit_max = 1000 # # Show direct image location when returning an image. # # This configuration option indicates whether to show the direct image # location when returning image details to the user. The direct image # location is where the image data is stored in backend storage. This # image location is shown under the image property ``direct_url``. # # When multiple image locations exist for an image, the best location # is displayed based on the store weightage assigned for each store # indicated by the configuration option ``weight``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_multiple_locations`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_multiple_locations # * weight # # (boolean value) #show_image_direct_url = false # DEPRECATED: # Show all image locations when returning an image. # # This configuration option indicates whether to show all the image # locations when returning image details to the user. When multiple # image locations exist for an image, the locations are ordered based # on the store weightage assigned for each store indicated by the # configuration option ``weight``. The image locations are shown # under the image property ``locations``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more # information. # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_image_direct_url`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_image_direct_url # * weight # # (boolean value) # This option is deprecated for removal since Newton. # Its value may be silently ignored in the future. # Reason: Use of this option, deprecated since Newton, is a security risk and # will be removed once we figure out a way to satisfy those use cases that # currently require it. An earlier announcement that the same functionality can # be achieved with greater granularity by using policies is incorrect. You # cannot work around this option via policy configuration at the present time, # though that is the direction we believe the fix will take. Please keep an eye # on the Glance release notes to stay up to date on progress in addressing this # issue. #show_multiple_locations = false # # Calculate hash and checksum for the image. # # This configuration option indicates that /v2/images/{image_id}/locations # POST API will calculate hash and checksum of the image on the fly. # If False it will silently ignore the hash and checksum calculation. # # Possible values: # * True # * False # (boolean value) #do_secure_hash = true # # The number of times to retry when any operation fails. # (integer value) #http_retries = 3 # # Maximum size of image a user can upload in bytes. # # An image upload greater than the size mentioned here would result # in an image creation failure. This configuration option defaults to # 1099511627776 bytes (1 TiB). # # NOTES: # * This value should only be increased after careful # consideration and must be set less than or equal to # 8 EiB (9223372036854775808). # * This value must be set with careful consideration of the # backend storage capacity. Setting this to a very low value # may result in a large number of image failures. And, setting # this to a very large value may result in faster consumption # of storage. Hence, this must be set according to the nature of # images created and storage capacity available. # # Possible values: # * Any positive number less than or equal to 9223372036854775808 # # (integer value) # Minimum value: 1 # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # # Maximum amount of image storage per tenant. # # This enforces an upper limit on the cumulative storage consumed by all images # of a tenant across all stores. This is a per-tenant limit. # # The default unit for this configuration option is Bytes. However, storage # units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, # ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and # TeraBytes respectively. Note that there should not be any space between the # value and unit. Value ``0`` signifies no quota enforcement. Negative values # are invalid and result in errors. # # This has no effect if ``use_keystone_limits`` is enabled. # # Possible values: # * A string that is a valid concatenation of a non-negative integer # representing the storage value and an optional string literal # representing storage units as mentioned above. # # Related options: # * use_keystone_limits # # (string value) #user_storage_quota = 0 # # Utilize per-tenant resource limits registered in Keystone. # # Enabling this feature will cause Glance to retrieve limits set in keystone # for resource consumption and enforce them against API users. Before turning # this on, the limits need to be registered in Keystone or all quotas will be # considered to be zero, and thus reject all new resource requests. # # These per-tenant resource limits are independent from the static # global ones configured in this config file. If this is enabled, the # relevant static global limits will be ignored. # (boolean value) #use_keystone_limits = false # # Host address of the pydev server. # # Provide a string value representing the hostname or IP of the # pydev server to use for debugging. The pydev server listens for # debug connections on this address, facilitating remote debugging # in Glance. # # Possible values: # * Valid hostname # * Valid IP address # # Related options: # * None # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #pydev_worker_debug_host = localhost # # Port number that the pydev server will listen on. # # Provide a port number to bind the pydev server to. The pydev # process accepts debug connections on this port and facilitates # remote debugging in Glance. # # Possible values: # * A valid port number # # Related options: # * None # # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # DEPRECATED: # AES key for encrypting store location metadata. # # Provide a string value representing the AES cipher to use for # encrypting Glance store metadata. # # NOTE: The AES key to use must be set to a random string of length # 16, 24 or 32 bytes. # # Possible values: # * String value representing a valid AES key # # Related options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option doesnt serves the purpose of encryption of location metadata, # whereas it encrypts location url only for specific APIs. Also if enabled # this during an upgrade may disrupt existing deployments, as it does not # support/provide db upgrade script to encrypt existing location URLs. # Moreover, its functionality for encrypting location URLs is inconsistent # which is resulting in download failures. #metadata_encryption_key = # DEPRECATED: # Digest algorithm to use for digital signature. # # Provide a string value representing the digest algorithm to # use for generating digital signatures. By default, ``sha256`` # is used. # # To get a list of the available algorithms supported by the version # of OpenSSL on your platform, run the command: # ``openssl list-message-digest-algorithms``. # Examples are 'sha1', 'sha256', and 'sha512'. # # NOTE: ``digest_algorithm`` is not related to Glance's image signing # and verification. It is only used to sign the universally unique # identifier (UUID) as a part of the certificate file and key file # validation. # # Possible values: # * An OpenSSL message digest algorithm identifier # # Relation options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option has had no effect since the removal of native SSL support. #digest_algorithm = sha256 # # The URL provides location where the temporary data will be stored # # This option is for Glance internal use only. Glance will save the # image data uploaded by the user to 'staging' endpoint during the # image import process. # # This option does not change the 'staging' API endpoint by any means. # # NOTE: It is discouraged to use same path as [task]/work_dir # # NOTE: 'file://' is the only option # api_image_import flow will support for now. # # NOTE: The staging path must be on shared filesystem available to all # Glance API nodes. # # Possible values: # * String starting with 'file://' followed by absolute FS path # # Related options: # * [task]/work_dir # # (string value) #node_staging_uri = file:///tmp/staging/ # # List of enabled Image Import Methods # # 'glance-direct', 'copy-image' and 'web-download' are enabled by default. # 'glance-download' is available, but requires federated deployments. # # Related options: # * [DEFAULT]/node_staging_uri (list value) #enabled_import_methods = [glance-direct,web-download,copy-image] # # The URL to this worker. # # If this is set, other glance workers will know how to contact this one # directly if needed. For image import, a single worker stages the image # and other workers need to be able to proxy the import request to the # right one. # # If unset, this will be considered to be `public_endpoint`, which # normally would be set to the same value on all workers, effectively # disabling the proxying behavior. # # Possible values: # * A URL by which this worker is reachable from other workers # # Related options: # * public_endpoint # # (string value) #worker_self_reference_url = # # The location of the property protection file. # # Provide a valid path to the property protection file which contains # the rules for property protections and the roles/policies associated # with them. # # A property protection file, when set, restricts the Glance image # properties to be created, read, updated and/or deleted by a specific # set of users that are identified by either roles or policies. # If this configuration option is not set, by default, property # protections won't be enforced. If a value is specified and the file # is not found, the glance-api service will fail to start. # More information on property protections can be found at: # https://docs.openstack.org/glance/latest/admin/property-protections.html # # Possible values: # * Empty string # * Valid path to the property protection configuration file # # Related options: # * property_protection_rule_format # # (string value) #property_protection_file = # # Rule format for property protection. # # Provide the desired way to set property protection on Glance # image properties. The two permissible values are ``roles`` # and ``policies``. The default value is ``roles``. # # If the value is ``roles``, the property protection file must # contain a comma separated list of user roles indicating # permissions for each of the CRUD operations on each property # being protected. If set to ``policies``, a policy defined in # policy.yaml is used to express property protections for each # of the CRUD operations. Examples of how property protections # are enforced based on ``roles`` or ``policies`` can be found at: # https://docs.openstack.org/glance/latest/admin/property- # protections.html#examples # # Possible values: # * roles # * policies # # Related options: # * property_protection_file # # (string value) # Possible values: # roles - # policies - #property_protection_rule_format = roles # # IP address to bind the glance servers to. # # Provide an IP address to bind the glance server to. The default # value is ``0.0.0.0``. # # Edit this option to enable the server to listen on one particular # IP address on the network card. This facilitates selection of a # particular network interface for the server. # # Possible values: # * A valid IPv4 address # * A valid IPv6 address # # Related options: # * None # # (host address value) #bind_host = 0.0.0.0 # # Port number on which the server will listen. # # Provide a valid port number to bind the server's socket to. This # port is then set to identify processes and forward network messages # that arrive at the server. The default bind_port value for the API # server is 9292 and for the registry server is 9191. # # Possible values: # * A valid port number (0 to 65535) # # Related options: # * None # # (port value) # Minimum value: 0 # Maximum value: 65535 #bind_port = # # Number of Glance worker processes to start. # # Provide a non-negative integer value to set the number of child # process workers to service requests. By default, the number of CPUs # available is set as the value for ``workers`` limited to 8. For # example if the processor count is 6, 6 workers will be used, if the # processor count is 24 only 8 workers will be used. The limit will only # apply to the default value, if 24 workers is configured, 24 is used. # # Each worker process is made to listen on the port set in the # configuration file and contains a greenthread pool of size 1000. # # NOTE: Setting the number of workers to zero, triggers the creation # of a single API process with a greenthread pool of size 1000. # # Possible values: # * 0 # * Positive integer value (typically equal to the number of CPUs) # # Related options: # * None # # (integer value) # Minimum value: 0 #workers = # # Maximum line size of message headers. # # Provide an integer value representing a length to limit the size of # message headers. The default value is 16384. # # NOTE: ``max_header_line`` may need to be increased when using large # tokens (typically those generated by the Keystone v3 API with big # service catalogs). However, it is to be kept in mind that larger # values for ``max_header_line`` would flood the logs. # # Setting ``max_header_line`` to 0 sets no limit for the line size of # message headers. # # Possible values: # * 0 # * Positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #max_header_line = 16384 # # Set keep alive option for HTTP over TCP. # # Provide a boolean value to determine sending of keep alive packets. # If set to ``False``, the server returns the header # "Connection: close". If set to ``True``, the server returns a # "Connection: Keep-Alive" in its responses. This enables retention of # the same TCP connection for HTTP conversations instead of opening a # new one with each new request. # # This option must be set to ``False`` if the client socket connection # needs to be closed explicitly after the response is received and # read successfully by the client. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #http_keepalive = true # # Timeout for client connections' socket operations. # # Provide a valid integer value representing time in seconds to set # the period of wait before an incoming connection can be closed. The # default value is 900 seconds. # # The value zero implies wait forever. # # Possible values: # * Zero # * Positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #client_socket_timeout = 900 # # Set the number of incoming connection requests. # # Provide a positive integer value to limit the number of requests in # the backlog queue. The default queue size is 4096. # # An incoming connection to a TCP listener socket is queued before a # connection can be established with the server. Setting the backlog # for a TCP socket ensures a limited queue size for incoming traffic. # # Possible values: # * Positive integer # # Related options: # * None # # (integer value) # Minimum value: 1 #backlog = 4096 # # Set the wait time before a connection recheck. # # Provide a positive integer value representing time in seconds which # is set as the idle wait time before a TCP keep alive packet can be # sent to the host. The default value is 600 seconds. # # Setting ``tcp_keepidle`` helps verify at regular intervals that a # connection is intact and prevents frequent TCP connection # reestablishment. # # Possible values: # * Positive integer value representing time in seconds # # Related options: # * None # # (integer value) # Minimum value: 1 #tcp_keepidle = 600 # Key:Value pair of store identifier and store type. In case of multiple # backends should be separated using comma. (dict value) #enabled_backends = # This argument is used internally on Windows. Glance passes a pipe handle to # child processes, which is then used for inter-process communication. (string # value) #pipe_handle = # DEPRECATED: # The relative path to sqlite file database that will be used for image cache # management. # # This is a relative path to the sqlite file database that tracks the age and # usage statistics of image cache. The path is relative to image cache base # directory, specified by the configuration option ``image_cache_dir``. # # This is a lightweight database with just one table. # # Possible values: # * A valid relative path to sqlite file database # # Related options: # * ``image_cache_dir`` # # (string value) # This option is deprecated for removal since Caracal (2024.1). # Its value may be silently ignored in the future. # Reason: # As centralized database will now be used for image cache management, the use # of `sqlite` database and driver will be dropped from 'E' (2025.1) # development cycle. #image_cache_sqlite_db = cache.db # # The driver to use for image cache management. # # This configuration option provides the flexibility to choose between the # different image-cache drivers available. An image-cache driver is responsible # for providing the essential functions of image-cache like write images to/read # images from cache, track age and usage of cached images, provide a list of # cached images, fetch size of the cache, queue images for caching and clean up # the cache, etc. # # The essential functions of a driver are defined in the base class # ``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing # and prospective) must implement this interface. Currently available drivers # are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they # store the information about cached images: # # * The ``centralized_db`` driver uses a central database (which will be common # for all glance nodes) to track the usage of cached images. # * The ``sqlite`` (deprecated) driver uses a sqlite database (which sits on # every glance node locally) to track the usage of cached images. # * The ``xattr`` driver uses the extended attributes of files to store this # information. It also requires a filesystem that sets ``atime`` on the files # when accessed. # # Deprecation warning: # * As centralized database will now be used for image cache management, the # use of `sqlite` database and driver will be dropped from 'E' (2025.1) # development cycle. # # Possible values: # * centralized_db # * sqlite # * xattr # # Related options: # * None # # (string value) # Possible values: # centralized_db - # sqlite - # xattr - #image_cache_driver = centralized_db # # The upper limit on cache size, in bytes, after which the cache-pruner cleans # up the image cache. # # NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a # hard limit beyond which the image cache would never grow. In fact, depending # on how often the cache-pruner runs and how quickly the cache fills, the image # cache can far exceed the size specified here very easily. Hence, care must be # taken to appropriately schedule the cache-pruner and in setting this limit. # # Glance caches an image when it is downloaded. Consequently, the size of the # image cache grows over time as the number of downloads increases. To keep the # cache size from becoming unmanageable, it is recommended to run the # cache-pruner as a periodic task. When the cache pruner is kicked off, it # compares the current size of image cache and triggers a cleanup if the image # cache grew beyond the size specified here. After the cleanup, the size of # cache is less than or equal to size specified here. # # Possible values: # * Any non-negative integer # # Related options: # * None # # (integer value) # Minimum value: 0 #image_cache_max_size = 10737418240 # # The amount of time, in seconds, an incomplete image remains in the cache. # # Incomplete images are images for which download is in progress. Please see the # description of configuration option ``image_cache_dir`` for more detail. # Sometimes, due to various reasons, it is possible the download may hang and # the incompletely downloaded image remains in the ``incomplete`` directory. # This configuration option sets a time limit on how long the incomplete images # should remain in the ``incomplete`` directory before they are cleaned up. # Once an incomplete image spends more time than is specified here, it'll be # removed by cache-cleaner on its next run. # # It is recommended to run cache-cleaner as a periodic task on the Glance API # nodes to keep the incomplete images from occupying disk space. # # Possible values: # * Any non-negative integer # # Related options: # * None # # (integer value) # Minimum value: 0 #image_cache_stall_time = 86400 # # Base directory for image cache. # # This is the location where image data is cached and served out of. All cached # images are stored directly under this directory. This directory also contains # three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. # # The ``incomplete`` subdirectory is the staging area for downloading images. An # image is first downloaded to this directory. When the image download is # successful it is moved to the base directory. However, if the download fails, # the partially downloaded image file is moved to the ``invalid`` subdirectory. # # The ``queue``subdirectory is used for queuing images for download. This is # used primarily by the cache-prefetcher, which can be scheduled as a periodic # task like cache-pruner and cache-cleaner, to cache images ahead of their # usage. # Upon receiving the request to cache an image, Glance touches a file in the # ``queue`` directory with the image id as the file name. The cache-prefetcher, # when running, polls for the files in ``queue`` directory and starts # downloading them in the order they were created. When the download is # successful, the zero-sized file is deleted from the ``queue`` directory. # If the download fails, the zero-sized file remains and it'll be retried the # next time cache-prefetcher runs. # # Possible values: # * A valid path # # Related options: # * ``image_cache_sqlite_db`` # # (string value) #image_cache_dir = # # Default publisher_id for outgoing Glance notifications. # # This is the value that the notification driver will use to identify # messages for events originating from the Glance service. Typically, # this is the hostname of the instance that generated the message. # # Possible values: # * Any reasonable instance identifier, for example: image.host1 # # Related options: # * None # # (string value) #default_publisher_id = image.localhost # # List of notifications to be disabled. # # Specify a list of notifications that should not be emitted. # A notification can be given either as a notification type to # disable a single event notification, or as a notification group # prefix to disable all event notifications within a group. # # Possible values: # A comma-separated list of individual notification types or # notification groups to be disabled. Currently supported groups: # # * image # * image.member # * task # * metadef_namespace # * metadef_object # * metadef_property # * metadef_resource_type # * metadef_tag # # For a complete listing and description of each event refer to: # https://docs.openstack.org/glance/latest/admin/notifications.html # # The values must be specified as: . # For example: image.create,task.success,metadef_tag # # Related options: # * None # # (list value) #disabled_notifications = # DEPRECATED: # The amount of time, in seconds, to delay image scrubbing. # # When delayed delete is turned on, an image is put into ``pending_delete`` # state upon deletion until the scrubber deletes its image data. Typically, soon # after the image is put into ``pending_delete`` state, it is available for # scrubbing. However, scrubbing can be delayed until a later point using this # configuration option. This option denotes the time period an image spends in # ``pending_delete`` state before it is available for scrubbing. # # It is important to realize that this has storage implications. The larger the # ``scrub_time``, the longer the time to reclaim backend storage from deleted # images. # # Possible values: # * Any non-negative integer # # Related options: # * ``delayed_delete`` # # (integer value) # Minimum value: 0 # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #scrub_time = 0 # DEPRECATED: # The size of thread pool to be used for scrubbing images. # # When there are a large number of images to scrub, it is beneficial to scrub # images in parallel so that the scrub queue stays in control and the backend # storage is reclaimed in a timely fashion. This configuration option denotes # the maximum number of images to be scrubbed in parallel. The default value is # one, which signifies serial scrubbing. Any value above one indicates parallel # scrubbing. # # Possible values: # * Any non-zero positive integer # # Related options: # * ``delayed_delete`` # # (integer value) # Minimum value: 1 # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #scrub_pool_size = 1 # DEPRECATED: # Turn on/off delayed delete. # # Typically when an image is deleted, the ``glance-api`` service puts the image # into ``deleted`` state and deletes its data at the same time. Delayed delete # is a feature in Glance that delays the actual deletion of image data until a # later point in time (as determined by the configuration option # ``scrub_time``). # When delayed delete is turned on, the ``glance-api`` service puts the image # into ``pending_delete`` state upon deletion and leaves the image data in the # storage backend for the image scrubber to delete at a later time. The image # scrubber will move the image into ``deleted`` state upon successful deletion # of image data. # # NOTE: When delayed delete is turned on, image scrubber MUST be running as a # periodic task to prevent the backend storage from filling up with undesired # usage. # # Possible values: # * True # * False # # Related options: # * ``scrub_time`` # * ``wakeup_time`` # * ``scrub_pool_size`` # # (boolean value) # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #delayed_delete = false # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format). (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # DEPRECATED: Uses logging handler designed to watch file system. When log file # is moved or removed this handler will open a new log file with specified path # instantaneously. It makes sense only if log_file option is specified and Linux # platform is used. This option is ignored if log_config_append is set. (boolean # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This function is known to have bene broken for long time, and depends # on the unmaintained library #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append is # set. (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol which # includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set. (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set. (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = false # DEPRECATED: Log output to Windows Event Log. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Windows support is no longer maintained. #use_eventlog = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set. (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval". (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when the # service was started) is used when scheduling the next rotation. (string value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files. (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size". (integer value) #max_logfile_size_mb = 200 # Log rotation type. (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message is # DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting. (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval. (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered. (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of RPC connection pool. (integer value) # Minimum value: 1 #rpc_conn_pool_size = 30 # The pool size limit for connections expiration policy (integer value) #conn_pool_min_size = 2 # The time-to-live in sec of idle connections in the pool (integer value) #conn_pool_ttl = 1200 # Size of executor thread pool when executor is threading or eventlet. (integer # value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # The network address and optional user credentials for connecting to the # messaging backend, in URL format. The expected format is: # # driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query # # Example: rabbit://rabbitmq:password@127.0.0.1:5672// # # For full details on the fields in the URL see the documentation of # oslo_messaging.TransportURL at # https://docs.openstack.org/oslo.messaging/latest/reference/transport.html # (string value) #transport_url = rabbit:// # The default exchange under which topics are scoped. May be overridden by an # exchange name specified in the transport_url option. (string value) #control_exchange = openstack # Add an endpoint to answer to ping calls. Endpoint is named # oslo_rpc_server_ping (boolean value) #rpc_ping_enabled = false [barbican] # # From castellan.config # # Use this endpoint to connect to Barbican, for example: # "http://localhost:9311/" (string value) #barbican_endpoint = # Version of the Barbican API, for example: "v1" (string value) #barbican_api_version = # Use this endpoint to connect to Keystone (string value) # Deprecated group/name - [key_manager]/auth_url #auth_endpoint = http://localhost/identity/v3 # Number of seconds to wait before retrying poll for key creation completion # (integer value) #retry_delay = 1 # Number of times to retry poll for key creation completion (integer value) #number_of_retries = 60 # Specifies if insecure TLS (https) requests. If False, the server's certificate # will not be validated, if True, we can set the verify_ssl_path config # meanwhile. (boolean value) #verify_ssl = true # A path to a bundle or CA certs to check against, or None for requests to # attempt to locate and use certificates which verify_ssh is True. If verify_ssl # is False, this is ignored. (string value) #verify_ssl_path = # Specifies the type of endpoint. (string value) # Possible values: # public - # internal - # admin - #barbican_endpoint_type = public # Specifies the region of the chosen endpoint. (string value) #barbican_region_name = # # When True, if sending a user token to a REST API, also send a service token. # # Nova often reuses the user token provided to the nova-api to talk to other # REST # APIs, such as Cinder, Glance and Neutron. It is possible that while the user # token was valid when the request was made to Nova, the token may expire before # it reaches the other service. To avoid any failures, and to make it clear it # is # Nova calling the service on the user's behalf, we include a service token # along # with the user token. Should the user's token have expired, a valid service # token ensures the REST API request will still be accepted by the keystone # middleware. # (boolean value) #send_service_user_token = false [barbican_service_user] # # From castellan.config # # PEM encoded Certificate Authority to use when verifying HTTPs connections. # (string value) #cafile = # PEM encoded client certificate cert file (string value) #certfile = # PEM encoded client certificate key file (string value) #keyfile = # Verify HTTPS connections. (boolean value) #insecure = false # Timeout value for http requests (integer value) #timeout = # Collect per-API call timing information. (boolean value) #collect_timing = false # Log requests to multiple loggers. (boolean value) #split_loggers = false # Authentication type to load (string value) # Deprecated group/name - [barbican_service_user]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (string value) #auth_section = [cinder] # # From glance.multi_store # # # Information to match when looking for cinder in the service catalog. # # When the ``cinder_endpoint_template`` is not set and any of # ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, ``cinder_store_password`` is not set, # cinder store uses this information to lookup cinder endpoint from the service # catalog in the current context. ``cinder_os_region_name``, if set, is taken # into consideration to fetch the appropriate endpoint. # # The service catalog can be listed by the ``openstack catalog list`` command. # # Possible values: # * A string of of the following form: # ``::`` # At least ``service_type`` and ``interface`` should be specified. # ``service_name`` can be omitted. # # Related options: # * cinder_os_region_name # * cinder_endpoint_template # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_catalog_info = volumev3::publicURL # # Override service catalog lookup with template for cinder endpoint. # # When this option is set, this value is used to generate cinder endpoint, # instead of looking up from the service catalog. # This value is ignored if ``cinder_store_auth_address``, # ``cinder_store_user_name``, ``cinder_store_project_name``, and # ``cinder_store_password`` are specified. # # If this configuration option is set, ``cinder_catalog_info`` will be ignored. # # Possible values: # * URL template string for cinder endpoint, where ``%%(tenant)s`` is # replaced with the current tenant (project) name. # For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # * cinder_catalog_info # # (string value) #cinder_endpoint_template = # # Region name to lookup cinder service from the service catalog. # # This is used only when ``cinder_catalog_info`` is used for determining the # endpoint. If set, the lookup for cinder endpoint by this node is filtered to # the specified region. It is useful when multiple regions are listed in the # catalog. If this is not set, the endpoint is looked up from every region. # # Possible values: # * A string that is a valid region name. # # Related options: # * cinder_catalog_info # # (string value) # Deprecated group/name - [cinder]/os_region_name #cinder_os_region_name = # # Location of a CA certificates file used for cinder client requests. # # The specified CA certificates file, if set, is used to verify cinder # connections via HTTPS endpoint. If the endpoint is HTTP, this value is # ignored. # ``cinder_api_insecure`` must be set to ``True`` to enable the verification. # # Possible values: # * Path to a ca certificates file # # Related options: # * cinder_api_insecure # # (string value) #cinder_ca_certificates_file = # # Number of cinderclient retries on failed http calls. # # When a call failed by any errors, cinderclient will retry the call up to the # specified times after sleeping a few seconds. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_http_retries = 3 # # Time period, in seconds, to wait for a cinder volume transition to # complete. # # When the cinder volume is created, deleted, or attached to the glance node to # read/write the volume data, the volume's state is changed. For example, the # newly created volume status changes from ``creating`` to ``available`` after # the creation process is completed. This specifies the maximum time to wait for # the status change. If a timeout occurs while waiting, or the status is changed # to an unexpected value (e.g. `error``), the image creation fails. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_state_transition_timeout = 300 # # Allow to perform insecure SSL requests to cinder. # # If this option is set to True, HTTPS endpoint connection is verified using the # CA certificates file specified by ``cinder_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * cinder_ca_certificates_file # # (boolean value) #cinder_api_insecure = false # # The address where the cinder authentication service is listening. # # When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, and ``cinder_store_password`` options are # specified, the specified values are always used for the authentication. # This is useful to hide the image volumes from users by storing them in a # project/tenant specific to the image service. It also enables users to share # the image volume among other projects under the control of glance's ACL. # # If either of these options are not set, the cinder endpoint is looked up # from the service catalog, and current context's user and project are used. # # Possible values: # * A valid authentication service address, for example: # ``http://openstack.example.org/identity/v2.0`` # # Related options: # * cinder_store_user_name # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_auth_address = # # User name to authenticate against cinder. # # This must be used with all the following non-domain-related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid user name # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_user_name = # # Domain of the user to authenticate against cinder. # # Possible values: # * A valid domain name for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_name # # (string value) #cinder_store_user_domain_name = Default # # Password for the user authenticating against cinder. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid password for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_password = # # Project name where the image volume is stored in cinder. # # If this configuration option is not set, the project in current context is # used. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid project name # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_name = # # Domain of the project where the image volume is stored in cinder. # # Possible values: # * A valid domain name of the project specified by # ``cinder_store_project_name`` # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_domain_name = Default # # Path to the rootwrap configuration file to use for running commands as root. # # The cinder store requires root privileges to operate the image volumes (for # connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). # The configuration file should allow the required commands by cinder store and # os-brick library. # # Possible values: # * Path to the rootwrap config file # # Related options: # * None # # (string value) #rootwrap_config = /etc/glance/rootwrap.conf # # Volume type that will be used for volume creation in cinder. # # Some cinder backends can have several volume types to optimize storage usage. # Adding this option allows an operator to choose a specific volume type # in cinder that can be optimized for images. # # If this is not set, then the default volume type specified in the cinder # configuration will be used for volume creation. # # Possible values: # * A valid volume type from cinder # # Related options: # * None # # NOTE: You cannot use an encrypted volume_type associated with an NFS backend. # An encrypted volume stored on an NFS backend will raise an exception whenever # glance_store tries to write or access image data stored in that volume. # Consult your Cinder administrator to determine an appropriate volume_type. # # (string value) #cinder_volume_type = # # If this is set to True, attachment of volumes for image transfer will # be aborted when multipathd is not running. Otherwise, it will fallback # to single path. # # Possible values: # * True or False # # Related options: # * cinder_use_multipath # # (boolean value) #cinder_enforce_multipath = false # # Flag to identify multipath is supported or not in the deployment. # # Set it to False if multipath is not supported. # # Possible values: # * True or False # # Related options: # * cinder_enforce_multipath # # (boolean value) #cinder_use_multipath = false # # Directory where the NFS volume is mounted on the glance node. # # Possible values: # # * A string representing absolute path of mount point. # (string value) #cinder_mount_point_base = /var/lib/glance/mnt # # If this is set to True, glance will perform an extend operation # on the attached volume. Only enable this option if the cinder # backend driver supports the functionality of extending online # (in-use) volumes. Supported from cinder microversion 3.42 and # onwards. By default, it is set to False. # # Possible values: # * True or False # # (boolean value) #cinder_do_extend_attached = false [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain received in the # requests "origin" header. Format: "://[:]", no trailing # slash. Example: https://horizon.example.com (list value) #allowed_origin = # Indicate that the actual request can include user credentials (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to HTTP Simple # Headers. (list value) #expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual request. (list # value) #allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID [database] # # From oslo.db # # If True, SQLite uses synchronous mode. (boolean value) #sqlite_synchronous = true # The back end to use for the database. (string value) #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. (string # value) #connection = # The SQLAlchemy connection string to use to connect to the slave database. # (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set by # the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # For Galera only, configure wsrep_sync_wait causality checks on new # connections. Default is None, meaning don't configure any setting. (integer # value) #mysql_wsrep_sync_wait = # Connections which have been present in the connection pool longer than this # number of seconds will be replaced with a new one the next time they are # checked out from the pool. (integer value) #connection_recycle_time = 3600 # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 # indicates no limit. (integer value) #max_pool_size = 5 # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count. (integer value) #max_retries = 10 # Interval between retries of opening a SQL connection. (integer value) #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer value) #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer # value) # Minimum value: 0 # Maximum value: 100 #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer value) #pool_timeout = # Enable the experimental use of database reconnect on connection lost. (boolean # value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count. (integer value) #db_max_retries = 20 # Optional URL parameters to append onto the connection URL at connect time; # specify as param1=value1¶m2=value2&... (string value) #connection_parameters = [file] # # From glance.multi_store # # # Directory to which the filesystem backend store writes images. # # Upon start up, Glance creates the directory if it doesn't already # exist and verifies write access to the user under which # ``glance-api`` runs. If the write access isn't available, a # ``BadStoreConfiguration`` exception is raised and the filesystem # store may not be available for adding new images. # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * A valid path to a directory # # Related options: # * ``filesystem_store_datadirs`` # * ``filesystem_store_file_perm`` # # (string value) #filesystem_store_datadir = /var/lib/glance/images # # List of directories and their priorities to which the filesystem # backend store writes images. # # The filesystem store can be configured to store images in multiple # directories as opposed to using a single directory specified by the # ``filesystem_store_datadir`` configuration option. When using # multiple directories, each directory can be given an optional # priority to specify the preference order in which they should # be used. Priority is an integer that is concatenated to the # directory path with a colon where a higher value indicates higher # priority. When two directories have the same priority, the directory # with most free space is used. When no priority is specified, it # defaults to zero. # # More information on configuring filesystem store with multiple store # directories can be found at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * List of strings of the following form: # * ``:`` # # Related options: # * ``filesystem_store_datadir`` # * ``filesystem_store_file_perm`` # # (multi valued) #filesystem_store_datadirs = # # Filesystem store metadata file. # # The path to a file which contains the metadata to be returned with any # location # associated with the filesystem store. Once this option is set, it is used for # new images created afterward only - previously existing images are not # affected. # # The file must contain a valid JSON object. The object should contain the keys # ``id`` and ``mountpoint``. The value for both keys should be a string. # # Possible values: # * A valid path to the store metadata file # # Related options: # * None # # (string value) #filesystem_store_metadata_file = # # File access permissions for the image files. # # Set the intended file access permissions for image data. This provides # a way to enable other services, e.g. Nova, to consume images directly # from the filesystem store. The users running the services that are # intended to be given access to could be made a member of the group # that owns the files created. Assigning a value less then or equal to # zero for this configuration option signifies that no changes be made # to the default permissions. This value will be decoded as an octal # digit. # # For more information, please refer the documentation at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # Possible values: # * A valid file access permission # * Zero # * Any negative integer # # Related options: # * None # # (integer value) #filesystem_store_file_perm = 0 # # Chunk size, in bytes. # # The chunk size used when reading or writing image files. Raising this value # may improve the throughput but it may also slightly increase the memory usage # when handling a large number of requests. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #filesystem_store_chunk_size = 65536 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the filesystem, the holes who can appear will automatically # be interpreted by the filesystem as null bytes, and do not really consume # your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #filesystem_thin_provisioning = false [glance.store.http.store] # # From glance.multi_store # # # Path to the CA bundle file. # # This configuration option enables the operator to use a custom # Certificate Authority file to verify the remote server certificate. If # this option is set, the ``https_insecure`` option will be ignored and # the CA file specified will be used to authenticate the server # certificate and establish a secure connection to the server. # # Possible values: # * A valid path to a CA file # # Related options: # * https_insecure # # (string value) #https_ca_certificates_file = # # Set verification of the remote server certificate. # # This configuration option takes in a boolean value to determine # whether or not to verify the remote server certificate. If set to # True, the remote server certificate is not verified. If the option is # set to False, then the default CA truststore is used for verification. # # This option is ignored if ``https_ca_certificates_file`` is set. # The remote server certificate will then be verified using the file # specified using the ``https_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * https_ca_certificates_file # # (boolean value) #https_insecure = true # # The http/https proxy information to be used to connect to the remote # server. # # This configuration option specifies the http/https proxy information # that should be used to connect to the remote server. The proxy # information should be a key value pair of the scheme and proxy, for # example, http:10.0.0.1:3128. You can also specify proxies for multiple # schemes by separating the key value pairs with a comma, for example, # http:10.0.0.1:3128, https:10.0.0.1:1080. # # Possible values: # * A comma separated list of scheme:proxy pairs as described above # # Related options: # * None # # (dict value) #http_proxy_information = [glance.store.rbd.store] # # From glance.multi_store # # # Size, in megabytes, to chunk RADOS images into. # # Provide an integer value representing the size in megabytes to chunk # Glance images into. The default chunk size is 8 megabytes. For optimal # performance, the value should be a power of two. # # When Ceph's RBD object storage system is used as the storage backend # for storing Glance images, the images are chunked into objects of the # size set using this option. These chunked objects are then stored # across the distributed block data store to use for Glance. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #rbd_store_chunk_size = 8 # # RADOS pool in which images are stored. # # When RBD is used as the storage backend for storing Glance images, the # images are stored by means of logical grouping of the objects (chunks # of images) into a ``pool``. Each pool is defined with the number of # placement groups it can contain. The default pool that is used is # 'images'. # # More information on the RBD storage backend can be found here: # http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ # # Possible Values: # * A valid pool name # # Related options: # * None # # (string value) #rbd_store_pool = images # # RADOS user to authenticate as. # # This configuration option takes in the RADOS user to authenticate as. # This is only needed when RADOS authentication is enabled and is # applicable only if the user is using Cephx authentication. If the # value for this option is not set by the user or is set to None, a # default value will be chosen, which will be based on the client. # section in rbd_store_ceph_conf. # # Possible Values: # * A valid RADOS user # # Related options: # * rbd_store_ceph_conf # # (string value) #rbd_store_user = # # Ceph configuration file path. # # This configuration option specifies the path to the Ceph configuration # file to be used. If the value for this option is not set by the user # or is set to the empty string, librados will read the standard ceph.conf # file by searching the default Ceph configuration file locations in # sequential order. See the Ceph documentation for details. # # NOTE: If using Cephx authentication, this file should include a reference # to the right keyring in a client. section # # NOTE 2: If you leave this option empty (the default), the actual Ceph # configuration file used may change depending on what version of librados # is being used. If it is important for you to know exactly which configuration # file is in effect, you may specify that file here using this option. # # Possible Values: # * A valid path to a configuration file # # Related options: # * rbd_store_user # # (string value) #rbd_store_ceph_conf = # # Timeout value for connecting to Ceph cluster. # # This configuration option takes in the timeout value in seconds used # when connecting to the Ceph cluster i.e. it sets the time to wait for # glance-api before closing the connection. This prevents glance-api # hangups during the connection to RBD. If the value for this option # is set to less than 0, no timeout is set and the default librados value # is used. # # Possible Values: # * Any integer value # # Related options: # * None # # (integer value) #rados_connect_timeout = -1 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the RBD backend, the holes who can appear will automatically # be interpreted by Ceph as null bytes, and do not really consume your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #rbd_thin_provisioning = false [glance.store.s3.store] # # From glance.multi_store # # # The host where the S3 server is listening. # # This configuration option sets the host of the S3 or S3 compatible storage # Server. This option is required when using the S3 storage backend. # The host can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com) # or an IP address (127.0.0.1). # # Possible values: # * A valid DNS name # * A valid IPv4 address # # Related Options: # * s3_store_access_key # * s3_store_secret_key # # (string value) #s3_store_host = # # The S3 region name. # # This parameter will set the region_name used by boto. # If this parameter is not set, we we will try to compute it from the # s3_store_host. # # Possible values: # * A valid region name # # Related Options: # * s3_store_host # # (string value) #s3_store_region_name = # # The S3 query token access key. # # This configuration option takes the access key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is the access key for a user with appropriate # privileges # # Related Options: # * s3_store_host # * s3_store_secret_key # # (string value) #s3_store_access_key = # # The S3 query token secret key. # # This configuration option takes the secret key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is a secret key corresponding to the access key # specified using the ``s3_store_host`` option # # Related Options: # * s3_store_host # * s3_store_access_key # # (string value) #s3_store_secret_key = # # The S3 bucket to be used to store the Glance data. # # This configuration option specifies where the glance images will be stored # in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be # created automatically even if the bucket does not exist. # # Possible values: # * Any string value # # Related Options: # * s3_store_create_bucket_on_put # * s3_store_bucket_url_format # # (string value) #s3_store_bucket = # # Determine whether S3 should create a new bucket. # # This configuration option takes boolean value to indicate whether Glance # should # create a new bucket to S3 if it does not exist. # # Possible values: # * Any Boolean value # # Related Options: # * None # # (boolean value) #s3_store_create_bucket_on_put = false # # The S3 calling format used to determine the object. # # This configuration option takes access model that is used to specify the # address of an object in an S3 bucket. # # NOTE: # In ``path``-style, the endpoint for the object looks like # 'https://s3.amazonaws.com/bucket/example.img'. # And in ``virtual``-style, the endpoint for the object looks like # 'https://bucket.s3.amazonaws.com/example.img'. # If you do not follow the DNS naming convention in the bucket name, you can # get objects in the path style, but not in the virtual style. # # Possible values: # * Any string value of ``auto``, ``virtual``, or ``path`` # # Related Options: # * s3_store_bucket # # (string value) #s3_store_bucket_url_format = auto # # What size, in MB, should S3 start chunking image files and do a multipart # upload in S3. # # This configuration option takes a threshold in MB to determine whether to # upload the image to S3 as is or to split it (Multipart Upload). # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_chunk_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_size = 100 # # What multipart upload part size, in MB, should S3 use when uploading parts. # # This configuration option takes the image split size in MB for Multipart # Upload. # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value (must be greater than or equal to 5M) # # Related Options: # * s3_store_large_object_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_chunk_size = 10 # # The number of thread pools to perform a multipart upload in S3. # # This configuration option takes the number of thread pools when performing a # Multipart Upload. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_size # * s3_store_large_object_chunk_size # # (integer value) #s3_store_thread_pools = 10 [glance.store.swift.store] # # From glance.multi_store # # # Set verification of the server certificate. # # This boolean determines whether or not to verify the server # certificate. If this option is set to True, swiftclient won't check # for a valid SSL certificate when authenticating. If the option is set # to False, then the default CA truststore is used for verification. # # Possible values: # * True # * False # # Related options: # * swift_store_cacert # # (boolean value) #swift_store_auth_insecure = false # # Path to the CA bundle file. # # This configuration option enables the operator to specify the path to # a custom Certificate Authority file for SSL verification when # connecting to Swift. # # Possible values: # * A valid path to a CA file # # Related options: # * swift_store_auth_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_cacert = /etc/ssl/certs/ca-certificates.crt # # The region of Swift endpoint to use by Glance. # # Provide a string value representing a Swift region where Glance # can connect to for image storage. By default, there is no region # set. # # When Glance uses Swift as the storage backend to store images # for a specific tenant that has multiple endpoints, setting of a # Swift region with ``swift_store_region`` allows Glance to connect # to Swift in the specified region as opposed to a single region # connectivity. # # This option can be configured for both single-tenant and # multi-tenant storage. # # NOTE: Setting the region with ``swift_store_region`` is # tenant-specific and is necessary ``only if`` the tenant has # multiple endpoints across different regions. # # Possible values: # * A string value representing a valid Swift region. # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_region = RegionTwo # # The URL endpoint to use for Swift backend storage. # # Provide a string value representing the URL endpoint to use for # storing Glance images in Swift store. By default, an endpoint # is not set and the storage URL returned by ``auth`` is used. # Setting an endpoint with ``swift_store_endpoint`` overrides the # storage URL and is used for Glance image storage. # # NOTE: The URL should include the path up to, but excluding the # container. The location of an object is obtained by appending # the container and object to the configured URL. # # Possible values: # * String value representing a valid URL path up to a Swift container # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name # # Endpoint Type of Swift service. # # This string value indicates the endpoint type to use to fetch the # Swift endpoint. The endpoint type determines the actions the user will # be allowed to perform, for instance, reading and writing to the Store. # This setting is only used if swift_store_auth_version is greater than # 1. # # Possible values: # * publicURL # * adminURL # * internalURL # # Related options: # * swift_store_endpoint # # (string value) # Possible values: # publicURL - # adminURL - # internalURL - #swift_store_endpoint_type = publicURL # # Type of Swift service to use. # # Provide a string value representing the service type to use for # storing images while using Swift backend storage. The default # service type is set to ``object-store``. # # NOTE: If ``swift_store_auth_version`` is set to 2, the value for # this configuration option needs to be ``object-store``. If using # a higher version of Keystone or a different auth scheme, this # option may be modified. # # Possible values: # * A string representing a valid service type for Swift storage. # # Related Options: # * None # # (string value) #swift_store_service_type = object-store # # Name of single container to store images/name prefix for multiple containers # # When a single container is being used to store images, this configuration # option indicates the container within the Glance account to be used for # storing all images. When multiple containers are used to store images, this # will be the name prefix for all containers. Usage of single/multiple # containers can be controlled using the configuration option # ``swift_store_multiple_containers_seed``. # # When using multiple containers, the containers will be named after the value # set for this configuration option with the first N chars of the image UUID # as the suffix delimited by an underscore (where N is specified by # ``swift_store_multiple_containers_seed``). # # Example: if the seed is set to 3 and swift_store_container = ``glance``, then # an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in # the container ``glance_fda``. All dashes in the UUID are included when # creating the container name but do not count toward the character limit, so # when N=10 the container name would be ``glance_fdae39a1-ba.`` # # Possible values: # * If using single container, this configuration option can be any string # that is a valid swift container name in Glance's Swift account # * If using multiple containers, this configuration option can be any # string as long as it satisfies the container naming rules enforced by # Swift. The value of ``swift_store_multiple_containers_seed`` should be # taken into account as well. # # Related options: # * ``swift_store_multiple_containers_seed`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (string value) #swift_store_container = glance # # The size threshold, in MB, after which Glance will start segmenting image # data. # # Swift has an upper limit on the size of a single uploaded object. By default, # this is 5GB. To upload objects bigger than this limit, objects are segmented # into multiple smaller objects that are tied together with a manifest file. # For more detail, refer to # https://docs.openstack.org/swift/latest/overview_large_objects.html # # This configuration option specifies the size threshold over which the Swift # driver will start segmenting image data into multiple smaller files. # Currently, the Swift driver only supports creating Dynamic Large Objects. # # NOTE: This should be set by taking into account the large object limit # enforced by the Swift cluster in consideration. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by the Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_chunk_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_size = 5120 # # The maximum size, in MB, of the segments when image data is segmented. # # When image data is segmented to upload images that are larger than the limit # enforced by the Swift cluster, image data is broken into segments that are no # bigger than the size specified by this configuration option. # Refer to ``swift_store_large_object_size`` for more detail. # # For example: if ``swift_store_large_object_size`` is 5GB and # ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be # segmented into 7 segments where the first six segments will be 1GB in size and # the seventh segment will be 0.2GB. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_chunk_size = 200 # # Create container, if it doesn't already exist, when uploading image. # # At the time of uploading an image, if the corresponding container doesn't # exist, it will be created provided this configuration option is set to True. # By default, it won't be created. This behavior is applicable for both single # and multiple containers mode. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #swift_store_create_container_on_put = false # # Store images in tenant's Swift account. # # This enables multi-tenant storage mode which causes Glance images to be stored # in tenant specific Swift accounts. If this is disabled, Glance stores all # images in its own account. More details multi-tenant store can be found at # https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage # # NOTE: If using multi-tenant swift store, please make sure # that you do not set a swift configuration file with the # 'swift_store_config_file' option. # # Possible values: # * True # * False # # Related options: # * swift_store_config_file # # (boolean value) #swift_store_multi_tenant = false # # Seed indicating the number of containers to use for storing images. # # When using a single-tenant store, images can be stored in one or more than one # containers. When set to 0, all images will be stored in one single container. # When set to an integer value between 1 and 32, multiple containers will be # used to store images. This configuration option will determine how many # containers are created. The total number of containers that will be used is # equal to 16^N, so if this config option is set to 2, then 16^2=256 containers # will be used to store images. # # Please refer to ``swift_store_container`` for more detail on the naming # convention. More detail about using multiple containers can be found at # https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- # multiple-containers.html # # NOTE: This is used only when swift_store_multi_tenant is disabled. # # Possible values: # * A non-negative integer less than or equal to 32 # # Related options: # * ``swift_store_container`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (integer value) # Minimum value: 0 # Maximum value: 32 #swift_store_multiple_containers_seed = 0 # # List of tenants that will be granted admin access. # # This is a list of tenants that will be granted read/write access on # all Swift containers created by Glance in multi-tenant mode. The # default value is an empty list. # # Possible values: # * A comma separated list of strings representing UUIDs of Keystone # projects/tenants # # Related options: # * None # # (list value) #swift_store_admin_tenants = # # SSL layer compression for HTTPS Swift requests. # # Provide a boolean value to determine whether or not to compress # HTTPS Swift requests for images at the SSL layer. By default, # compression is enabled. # # When using Swift as the backend store for Glance image storage, # SSL layer compression of HTTPS Swift requests can be set using # this option. If set to False, SSL layer compression of HTTPS # Swift requests is disabled. Disabling this option may improve # performance for images which are already in a compressed format, # for example, qcow2. # # Possible values: # * True # * False # # Related Options: # * None # # (boolean value) #swift_store_ssl_compression = true # # The number of times a Swift download will be retried before the # request fails. # # Provide an integer value representing the number of times an image # download must be retried before erroring out. The default value is # zero (no retry on a failed image download). When set to a positive # integer value, ``swift_store_retry_get_count`` ensures that the # download is attempted this many more times upon a download failure # before sending an error message. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_retry_get_count = 0 # # Time in seconds defining the size of the window in which a new # token may be requested before the current token is due to expire. # # Typically, the Swift storage driver fetches a new token upon the # expiration of the current token to ensure continued access to # Swift. However, some Swift transactions (like uploading image # segments) may not recover well if the token expires on the fly. # # Hence, by fetching a new token before the current token expiration, # we make sure that the token does not expire or is close to expiry # before a transaction is attempted. By default, the Swift storage # driver requests for a new token 60 seconds or less before the # current token expiration. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_expire_soon_interval = 60 # # Use trusts for multi-tenant Swift store. # # This option instructs the Swift store to create a trust for each # add/get request when the multi-tenant store is in use. Using trusts # allows the Swift store to avoid problems that can be caused by an # authentication token expiring during the upload or download of data. # # By default, ``swift_store_use_trusts`` is set to ``True``(use of # trusts is enabled). If set to ``False``, a user token is used for # the Swift connection instead, eliminating the overhead of trust # creation. # # NOTE: This option is considered only when # ``swift_store_multi_tenant`` is set to ``True`` # # Possible values: # * True # * False # # Related options: # * swift_store_multi_tenant # # (boolean value) #swift_store_use_trusts = true # # Buffer image segments before upload to Swift. # # Provide a boolean value to indicate whether or not Glance should # buffer image data to disk while uploading to swift. This enables # Glance to resume uploads on error. # # NOTES: # When enabling this option, one should take great care as this # increases disk usage on the API node. Be aware that depending # upon how the file system is configured, the disk space used # for buffering may decrease the actual disk space available for # the glance image cache. Disk utilization will cap according to # the following equation: # (``swift_store_large_object_chunk_size`` * ``workers`` * 1000) # # Possible values: # * True # * False # # Related options: # * swift_upload_buffer_dir # # (boolean value) #swift_buffer_on_upload = false # # Reference to default Swift account/backing store parameters. # # Provide a string value representing a reference to the default set # of parameters required for using swift account/backing store for # image storage. The default reference value for this configuration # option is 'ref1'. This configuration option dereferences the # parameters and facilitates image storage in Swift storage backend # every time a new image is added. # # Possible values: # * A valid string value # # Related options: # * None # # (string value) #default_swift_reference = ref1 # DEPRECATED: Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_version' in the Swift back-end configuration file is # used instead. #swift_store_auth_version = 2 # DEPRECATED: The address where the Swift authentication service is listening. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_address' in the Swift back-end configuration file is # used instead. #swift_store_auth_address = # DEPRECATED: The user to authenticate against the Swift authentication service. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'user' in the Swift back-end configuration file is set instead. #swift_store_user = # DEPRECATED: Auth key for the user authenticating against the Swift # authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'key' in the Swift back-end configuration file is used # to set the authentication key instead. #swift_store_key = # # Absolute path to the file containing the swift account(s) # configurations. # # Include a string value representing the path to a configuration # file that has references for each of the configured Swift # account(s)/backing stores. By default, no file path is specified # and customized Swift referencing is disabled. Configuring this # option is highly recommended while using Swift storage backend for # image storage as it avoids storage of credentials in the database. # # NOTE: Please do not configure this option if you have set # ``swift_store_multi_tenant`` to ``True``. # # Possible values: # * String value representing an absolute path on the glance-api # node # # Related options: # * swift_store_multi_tenant # # (string value) #swift_store_config_file = # # Directory to buffer image segments before upload to Swift. # # Provide a string value representing the absolute path to the # directory on the glance node where image segments will be # buffered briefly before they are uploaded to swift. # # NOTES: # * This is required only when the configuration option # ``swift_buffer_on_upload`` is set to True. # * This directory should be provisioned keeping in mind the # ``swift_store_large_object_chunk_size`` and the maximum # number of images that could be uploaded simultaneously by # a given glance node. # # Possible values: # * String value representing an absolute directory path # # Related options: # * swift_buffer_on_upload # * swift_store_large_object_chunk_size # # (string value) #swift_upload_buffer_dir = [glance.store.vmware_datastore.store] # # From glance.multi_store # # # Address of the ESX/ESXi or vCenter Server target system. # # This configuration option sets the address of the ESX/ESXi or vCenter # Server target system. This option is required when using the VMware # storage backend. The address can contain an IP address (127.0.0.1) or # a DNS name (www.my-domain.com). # # Possible Values: # * A valid IPv4 or IPv6 address # * A valid DNS name # # Related options: # * vmware_server_username # * vmware_server_password # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_host = 127.0.0.1 # # Server username. # # This configuration option takes the username for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is the username for a user with appropriate # privileges # # Related options: # * vmware_server_host # * vmware_server_password # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_username = root # # Server password. # # This configuration option takes the password for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is a password corresponding to the username # specified using the "vmware_server_username" option # # Related options: # * vmware_server_host # * vmware_server_username # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_password = vmware # # The number of VMware API retries. # # This configuration option specifies the number of times the VMware # ESX/VC server API must be retried upon connection related issues or # server API call overload. It is not possible to specify 'retry # forever'. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_api_retry_count = 10 # # Interval in seconds used for polling remote tasks invoked on VMware # ESX/VC server. # # This configuration option takes in the sleep time in seconds for polling an # on-going async task as part of the VMWare ESX/VC server API call. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_task_poll_interval = 5 # # The directory where the glance images will be stored in the datastore. # # This configuration option specifies the path to the directory where the # glance images will be stored in the VMware datastore. If this option # is not set, the default directory where the glance images are stored # is openstack_glance. # # Possible Values: # * Any string that is a valid path to a directory # # Related options: # * None # # (string value) #vmware_store_image_dir = /openstack_glance # # Set verification of the ESX/vCenter server certificate. # # This configuration option takes a boolean value to determine # whether or not to verify the ESX/vCenter server certificate. If this # option is set to True, the ESX/vCenter server certificate is not # verified. If this option is set to False, then the default CA # truststore is used for verification. # # This option is ignored if the "vmware_ca_file" option is set. In that # case, the ESX/vCenter server certificate will then be verified using # the file specified using the "vmware_ca_file" option . # # Possible Values: # * True # * False # # Related options: # * vmware_ca_file # # (boolean value) # Deprecated group/name - [glance.store.vmware_datastore.store]/vmware_api_insecure #vmware_insecure = false # # Absolute path to the CA bundle file. # # This configuration option enables the operator to use a custom # Cerificate Authority File to verify the ESX/vCenter certificate. # # If this option is set, the "vmware_insecure" option will be ignored # and the CA file specified will be used to authenticate the ESX/vCenter # server certificate and establish a secure connection to the server. # # Possible Values: # * Any string that is a valid absolute path to a CA file # # Related options: # * vmware_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_ca_file = /etc/ssl/certs/ca-certificates.crt # # The datastores where the image can be stored. # # This configuration option specifies the datastores where the image can # be stored in the VMWare store backend. This option may be specified # multiple times for specifying multiple datastores. The datastore name # should be specified after its datacenter path, separated by ":". An # optional weight may be given after the datastore name, separated again # by ":" to specify the priority. Thus, the required format becomes # ::. # # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases # where the image size is already known. If no weight is given, it is # assumed to be zero and the directory will be considered for selection # last. If multiple datastores have the same weight, then the one with # the most free space available is selected. # # Possible Values: # * Any string of the format: # :: # # Related options: # * None # # (multi valued) #vmware_datastores = [glance_store] # # From glance.multi_store # # # The store identifier for the default backend in which data will be # stored. # # The value must be defined as one of the keys in the dict defined # by the ``enabled_backends`` configuration option in the DEFAULT # configuration group. # # If a value is not defined for this option: # # * the consuming service may refuse to start # * store_add calls that do not specify a specific backend will # raise a ``glance_store.exceptions.UnknownScheme`` exception # # Related Options: # * enabled_backends # # (string value) #default_backend = # # From glance.store # # DEPRECATED: # List of enabled Glance stores. # # Register the storage backends to use for storing disk images # as a comma separated list. The default stores enabled for # storing disk images with Glance are ``file`` and ``http``. # # Possible values: # * A comma separated list that could include: # * file # * http # * swift # * rbd # * cinder # * vmware # * s3 # # Related Options: # * default_store # # (list value) # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``enabled_backends`` which helps to configure multiple backend stores # of different schemes. # # This option is scheduled for removal in the U development # cycle. #stores = file,http # DEPRECATED: # The default scheme to use for storing images. # # Provide a string value representing the default scheme to use for # storing images. If not set, Glance uses ``file`` as the default # scheme to store images with the ``file`` store. # # NOTE: The value given for this configuration option must be a valid # scheme for a store registered with the ``stores`` configuration # option. # # Possible values: # * file # * filesystem # * http # * https # * swift # * swift+http # * swift+https # * swift+config # * rbd # * cinder # * vsphere # * s3 # # Related Options: # * stores # # (string value) # Possible values: # file - # filesystem - # http - # https - # swift - # swift+http - # swift+https - # swift+config - # rbd - # cinder - # vsphere - # s3 - # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``default_backend`` which acts similar to ``default_store`` config # option. # # This option is scheduled for removal in the U development # cycle. #default_store = file # # Information to match when looking for cinder in the service catalog. # # When the ``cinder_endpoint_template`` is not set and any of # ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, ``cinder_store_password`` is not set, # cinder store uses this information to lookup cinder endpoint from the service # catalog in the current context. ``cinder_os_region_name``, if set, is taken # into consideration to fetch the appropriate endpoint. # # The service catalog can be listed by the ``openstack catalog list`` command. # # Possible values: # * A string of of the following form: # ``::`` # At least ``service_type`` and ``interface`` should be specified. # ``service_name`` can be omitted. # # Related options: # * cinder_os_region_name # * cinder_endpoint_template # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_catalog_info = volumev3::publicURL # # Override service catalog lookup with template for cinder endpoint. # # When this option is set, this value is used to generate cinder endpoint, # instead of looking up from the service catalog. # This value is ignored if ``cinder_store_auth_address``, # ``cinder_store_user_name``, ``cinder_store_project_name``, and # ``cinder_store_password`` are specified. # # If this configuration option is set, ``cinder_catalog_info`` will be ignored. # # Possible values: # * URL template string for cinder endpoint, where ``%%(tenant)s`` is # replaced with the current tenant (project) name. # For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # * cinder_catalog_info # # (string value) #cinder_endpoint_template = # # Region name to lookup cinder service from the service catalog. # # This is used only when ``cinder_catalog_info`` is used for determining the # endpoint. If set, the lookup for cinder endpoint by this node is filtered to # the specified region. It is useful when multiple regions are listed in the # catalog. If this is not set, the endpoint is looked up from every region. # # Possible values: # * A string that is a valid region name. # # Related options: # * cinder_catalog_info # # (string value) # Deprecated group/name - [glance_store]/os_region_name #cinder_os_region_name = # # Location of a CA certificates file used for cinder client requests. # # The specified CA certificates file, if set, is used to verify cinder # connections via HTTPS endpoint. If the endpoint is HTTP, this value is # ignored. # ``cinder_api_insecure`` must be set to ``True`` to enable the verification. # # Possible values: # * Path to a ca certificates file # # Related options: # * cinder_api_insecure # # (string value) #cinder_ca_certificates_file = # # Number of cinderclient retries on failed http calls. # # When a call failed by any errors, cinderclient will retry the call up to the # specified times after sleeping a few seconds. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_http_retries = 3 # # Time period, in seconds, to wait for a cinder volume transition to # complete. # # When the cinder volume is created, deleted, or attached to the glance node to # read/write the volume data, the volume's state is changed. For example, the # newly created volume status changes from ``creating`` to ``available`` after # the creation process is completed. This specifies the maximum time to wait for # the status change. If a timeout occurs while waiting, or the status is changed # to an unexpected value (e.g. `error``), the image creation fails. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_state_transition_timeout = 300 # # Allow to perform insecure SSL requests to cinder. # # If this option is set to True, HTTPS endpoint connection is verified using the # CA certificates file specified by ``cinder_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * cinder_ca_certificates_file # # (boolean value) #cinder_api_insecure = false # # The address where the cinder authentication service is listening. # # When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, and ``cinder_store_password`` options are # specified, the specified values are always used for the authentication. # This is useful to hide the image volumes from users by storing them in a # project/tenant specific to the image service. It also enables users to share # the image volume among other projects under the control of glance's ACL. # # If either of these options are not set, the cinder endpoint is looked up # from the service catalog, and current context's user and project are used. # # Possible values: # * A valid authentication service address, for example: # ``http://openstack.example.org/identity/v2.0`` # # Related options: # * cinder_store_user_name # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_auth_address = # # User name to authenticate against cinder. # # This must be used with all the following non-domain-related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid user name # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_user_name = # # Domain of the user to authenticate against cinder. # # Possible values: # * A valid domain name for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_name # # (string value) #cinder_store_user_domain_name = Default # # Password for the user authenticating against cinder. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid password for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_password = # # Project name where the image volume is stored in cinder. # # If this configuration option is not set, the project in current context is # used. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid project name # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_name = # # Domain of the project where the image volume is stored in cinder. # # Possible values: # * A valid domain name of the project specified by # ``cinder_store_project_name`` # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_domain_name = Default # # Path to the rootwrap configuration file to use for running commands as root. # # The cinder store requires root privileges to operate the image volumes (for # connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). # The configuration file should allow the required commands by cinder store and # os-brick library. # # Possible values: # * Path to the rootwrap config file # # Related options: # * None # # (string value) #rootwrap_config = /etc/glance/rootwrap.conf # # Volume type that will be used for volume creation in cinder. # # Some cinder backends can have several volume types to optimize storage usage. # Adding this option allows an operator to choose a specific volume type # in cinder that can be optimized for images. # # If this is not set, then the default volume type specified in the cinder # configuration will be used for volume creation. # # Possible values: # * A valid volume type from cinder # # Related options: # * None # # NOTE: You cannot use an encrypted volume_type associated with an NFS backend. # An encrypted volume stored on an NFS backend will raise an exception whenever # glance_store tries to write or access image data stored in that volume. # Consult your Cinder administrator to determine an appropriate volume_type. # # (string value) #cinder_volume_type = # # If this is set to True, attachment of volumes for image transfer will # be aborted when multipathd is not running. Otherwise, it will fallback # to single path. # # Possible values: # * True or False # # Related options: # * cinder_use_multipath # # (boolean value) #cinder_enforce_multipath = false # # Flag to identify multipath is supported or not in the deployment. # # Set it to False if multipath is not supported. # # Possible values: # * True or False # # Related options: # * cinder_enforce_multipath # # (boolean value) #cinder_use_multipath = false # # Directory where the NFS volume is mounted on the glance node. # # Possible values: # # * A string representing absolute path of mount point. # (string value) #cinder_mount_point_base = /var/lib/glance/mnt # # If this is set to True, glance will perform an extend operation # on the attached volume. Only enable this option if the cinder # backend driver supports the functionality of extending online # (in-use) volumes. Supported from cinder microversion 3.42 and # onwards. By default, it is set to False. # # Possible values: # * True or False # # (boolean value) #cinder_do_extend_attached = false # # Directory to which the filesystem backend store writes images. # # Upon start up, Glance creates the directory if it doesn't already # exist and verifies write access to the user under which # ``glance-api`` runs. If the write access isn't available, a # ``BadStoreConfiguration`` exception is raised and the filesystem # store may not be available for adding new images. # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * A valid path to a directory # # Related options: # * ``filesystem_store_datadirs`` # * ``filesystem_store_file_perm`` # # (string value) #filesystem_store_datadir = /var/lib/glance/images # # List of directories and their priorities to which the filesystem # backend store writes images. # # The filesystem store can be configured to store images in multiple # directories as opposed to using a single directory specified by the # ``filesystem_store_datadir`` configuration option. When using # multiple directories, each directory can be given an optional # priority to specify the preference order in which they should # be used. Priority is an integer that is concatenated to the # directory path with a colon where a higher value indicates higher # priority. When two directories have the same priority, the directory # with most free space is used. When no priority is specified, it # defaults to zero. # # More information on configuring filesystem store with multiple store # directories can be found at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * List of strings of the following form: # * ``:`` # # Related options: # * ``filesystem_store_datadir`` # * ``filesystem_store_file_perm`` # # (multi valued) #filesystem_store_datadirs = # # Filesystem store metadata file. # # The path to a file which contains the metadata to be returned with any # location # associated with the filesystem store. Once this option is set, it is used for # new images created afterward only - previously existing images are not # affected. # # The file must contain a valid JSON object. The object should contain the keys # ``id`` and ``mountpoint``. The value for both keys should be a string. # # Possible values: # * A valid path to the store metadata file # # Related options: # * None # # (string value) #filesystem_store_metadata_file = # # File access permissions for the image files. # # Set the intended file access permissions for image data. This provides # a way to enable other services, e.g. Nova, to consume images directly # from the filesystem store. The users running the services that are # intended to be given access to could be made a member of the group # that owns the files created. Assigning a value less then or equal to # zero for this configuration option signifies that no changes be made # to the default permissions. This value will be decoded as an octal # digit. # # For more information, please refer the documentation at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # Possible values: # * A valid file access permission # * Zero # * Any negative integer # # Related options: # * None # # (integer value) #filesystem_store_file_perm = 0 # # Chunk size, in bytes. # # The chunk size used when reading or writing image files. Raising this value # may improve the throughput but it may also slightly increase the memory usage # when handling a large number of requests. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #filesystem_store_chunk_size = 65536 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the filesystem, the holes who can appear will automatically # be interpreted by the filesystem as null bytes, and do not really consume # your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #filesystem_thin_provisioning = false # # Path to the CA bundle file. # # This configuration option enables the operator to use a custom # Certificate Authority file to verify the remote server certificate. If # this option is set, the ``https_insecure`` option will be ignored and # the CA file specified will be used to authenticate the server # certificate and establish a secure connection to the server. # # Possible values: # * A valid path to a CA file # # Related options: # * https_insecure # # (string value) #https_ca_certificates_file = # # Set verification of the remote server certificate. # # This configuration option takes in a boolean value to determine # whether or not to verify the remote server certificate. If set to # True, the remote server certificate is not verified. If the option is # set to False, then the default CA truststore is used for verification. # # This option is ignored if ``https_ca_certificates_file`` is set. # The remote server certificate will then be verified using the file # specified using the ``https_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * https_ca_certificates_file # # (boolean value) #https_insecure = true # # The http/https proxy information to be used to connect to the remote # server. # # This configuration option specifies the http/https proxy information # that should be used to connect to the remote server. The proxy # information should be a key value pair of the scheme and proxy, for # example, http:10.0.0.1:3128. You can also specify proxies for multiple # schemes by separating the key value pairs with a comma, for example, # http:10.0.0.1:3128, https:10.0.0.1:1080. # # Possible values: # * A comma separated list of scheme:proxy pairs as described above # # Related options: # * None # # (dict value) #http_proxy_information = # # Size, in megabytes, to chunk RADOS images into. # # Provide an integer value representing the size in megabytes to chunk # Glance images into. The default chunk size is 8 megabytes. For optimal # performance, the value should be a power of two. # # When Ceph's RBD object storage system is used as the storage backend # for storing Glance images, the images are chunked into objects of the # size set using this option. These chunked objects are then stored # across the distributed block data store to use for Glance. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #rbd_store_chunk_size = 8 # # RADOS pool in which images are stored. # # When RBD is used as the storage backend for storing Glance images, the # images are stored by means of logical grouping of the objects (chunks # of images) into a ``pool``. Each pool is defined with the number of # placement groups it can contain. The default pool that is used is # 'images'. # # More information on the RBD storage backend can be found here: # http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ # # Possible Values: # * A valid pool name # # Related options: # * None # # (string value) #rbd_store_pool = images # # RADOS user to authenticate as. # # This configuration option takes in the RADOS user to authenticate as. # This is only needed when RADOS authentication is enabled and is # applicable only if the user is using Cephx authentication. If the # value for this option is not set by the user or is set to None, a # default value will be chosen, which will be based on the client. # section in rbd_store_ceph_conf. # # Possible Values: # * A valid RADOS user # # Related options: # * rbd_store_ceph_conf # # (string value) #rbd_store_user = # # Ceph configuration file path. # # This configuration option specifies the path to the Ceph configuration # file to be used. If the value for this option is not set by the user # or is set to the empty string, librados will read the standard ceph.conf # file by searching the default Ceph configuration file locations in # sequential order. See the Ceph documentation for details. # # NOTE: If using Cephx authentication, this file should include a reference # to the right keyring in a client. section # # NOTE 2: If you leave this option empty (the default), the actual Ceph # configuration file used may change depending on what version of librados # is being used. If it is important for you to know exactly which configuration # file is in effect, you may specify that file here using this option. # # Possible Values: # * A valid path to a configuration file # # Related options: # * rbd_store_user # # (string value) #rbd_store_ceph_conf = # # Timeout value for connecting to Ceph cluster. # # This configuration option takes in the timeout value in seconds used # when connecting to the Ceph cluster i.e. it sets the time to wait for # glance-api before closing the connection. This prevents glance-api # hangups during the connection to RBD. If the value for this option # is set to less than 0, no timeout is set and the default librados value # is used. # # Possible Values: # * Any integer value # # Related options: # * None # # (integer value) #rados_connect_timeout = -1 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the RBD backend, the holes who can appear will automatically # be interpreted by Ceph as null bytes, and do not really consume your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #rbd_thin_provisioning = false # # The host where the S3 server is listening. # # This configuration option sets the host of the S3 or S3 compatible storage # Server. This option is required when using the S3 storage backend. # The host can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com) # or an IP address (127.0.0.1). # # Possible values: # * A valid DNS name # * A valid IPv4 address # # Related Options: # * s3_store_access_key # * s3_store_secret_key # # (string value) #s3_store_host = # # The S3 region name. # # This parameter will set the region_name used by boto. # If this parameter is not set, we we will try to compute it from the # s3_store_host. # # Possible values: # * A valid region name # # Related Options: # * s3_store_host # # (string value) #s3_store_region_name = # # The S3 query token access key. # # This configuration option takes the access key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is the access key for a user with appropriate # privileges # # Related Options: # * s3_store_host # * s3_store_secret_key # # (string value) #s3_store_access_key = # # The S3 query token secret key. # # This configuration option takes the secret key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is a secret key corresponding to the access key # specified using the ``s3_store_host`` option # # Related Options: # * s3_store_host # * s3_store_access_key # # (string value) #s3_store_secret_key = # # The S3 bucket to be used to store the Glance data. # # This configuration option specifies where the glance images will be stored # in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be # created automatically even if the bucket does not exist. # # Possible values: # * Any string value # # Related Options: # * s3_store_create_bucket_on_put # * s3_store_bucket_url_format # # (string value) #s3_store_bucket = # # Determine whether S3 should create a new bucket. # # This configuration option takes boolean value to indicate whether Glance # should # create a new bucket to S3 if it does not exist. # # Possible values: # * Any Boolean value # # Related Options: # * None # # (boolean value) #s3_store_create_bucket_on_put = false # # The S3 calling format used to determine the object. # # This configuration option takes access model that is used to specify the # address of an object in an S3 bucket. # # NOTE: # In ``path``-style, the endpoint for the object looks like # 'https://s3.amazonaws.com/bucket/example.img'. # And in ``virtual``-style, the endpoint for the object looks like # 'https://bucket.s3.amazonaws.com/example.img'. # If you do not follow the DNS naming convention in the bucket name, you can # get objects in the path style, but not in the virtual style. # # Possible values: # * Any string value of ``auto``, ``virtual``, or ``path`` # # Related Options: # * s3_store_bucket # # (string value) #s3_store_bucket_url_format = auto # # What size, in MB, should S3 start chunking image files and do a multipart # upload in S3. # # This configuration option takes a threshold in MB to determine whether to # upload the image to S3 as is or to split it (Multipart Upload). # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_chunk_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_size = 100 # # What multipart upload part size, in MB, should S3 use when uploading parts. # # This configuration option takes the image split size in MB for Multipart # Upload. # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value (must be greater than or equal to 5M) # # Related Options: # * s3_store_large_object_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_chunk_size = 10 # # The number of thread pools to perform a multipart upload in S3. # # This configuration option takes the number of thread pools when performing a # Multipart Upload. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_size # * s3_store_large_object_chunk_size # # (integer value) #s3_store_thread_pools = 10 # # Set verification of the server certificate. # # This boolean determines whether or not to verify the server # certificate. If this option is set to True, swiftclient won't check # for a valid SSL certificate when authenticating. If the option is set # to False, then the default CA truststore is used for verification. # # Possible values: # * True # * False # # Related options: # * swift_store_cacert # # (boolean value) #swift_store_auth_insecure = false # # Path to the CA bundle file. # # This configuration option enables the operator to specify the path to # a custom Certificate Authority file for SSL verification when # connecting to Swift. # # Possible values: # * A valid path to a CA file # # Related options: # * swift_store_auth_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_cacert = /etc/ssl/certs/ca-certificates.crt # # The region of Swift endpoint to use by Glance. # # Provide a string value representing a Swift region where Glance # can connect to for image storage. By default, there is no region # set. # # When Glance uses Swift as the storage backend to store images # for a specific tenant that has multiple endpoints, setting of a # Swift region with ``swift_store_region`` allows Glance to connect # to Swift in the specified region as opposed to a single region # connectivity. # # This option can be configured for both single-tenant and # multi-tenant storage. # # NOTE: Setting the region with ``swift_store_region`` is # tenant-specific and is necessary ``only if`` the tenant has # multiple endpoints across different regions. # # Possible values: # * A string value representing a valid Swift region. # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_region = RegionTwo # # The URL endpoint to use for Swift backend storage. # # Provide a string value representing the URL endpoint to use for # storing Glance images in Swift store. By default, an endpoint # is not set and the storage URL returned by ``auth`` is used. # Setting an endpoint with ``swift_store_endpoint`` overrides the # storage URL and is used for Glance image storage. # # NOTE: The URL should include the path up to, but excluding the # container. The location of an object is obtained by appending # the container and object to the configured URL. # # Possible values: # * String value representing a valid URL path up to a Swift container # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name # # Endpoint Type of Swift service. # # This string value indicates the endpoint type to use to fetch the # Swift endpoint. The endpoint type determines the actions the user will # be allowed to perform, for instance, reading and writing to the Store. # This setting is only used if swift_store_auth_version is greater than # 1. # # Possible values: # * publicURL # * adminURL # * internalURL # # Related options: # * swift_store_endpoint # # (string value) # Possible values: # publicURL - # adminURL - # internalURL - #swift_store_endpoint_type = publicURL # # Type of Swift service to use. # # Provide a string value representing the service type to use for # storing images while using Swift backend storage. The default # service type is set to ``object-store``. # # NOTE: If ``swift_store_auth_version`` is set to 2, the value for # this configuration option needs to be ``object-store``. If using # a higher version of Keystone or a different auth scheme, this # option may be modified. # # Possible values: # * A string representing a valid service type for Swift storage. # # Related Options: # * None # # (string value) #swift_store_service_type = object-store # # Name of single container to store images/name prefix for multiple containers # # When a single container is being used to store images, this configuration # option indicates the container within the Glance account to be used for # storing all images. When multiple containers are used to store images, this # will be the name prefix for all containers. Usage of single/multiple # containers can be controlled using the configuration option # ``swift_store_multiple_containers_seed``. # # When using multiple containers, the containers will be named after the value # set for this configuration option with the first N chars of the image UUID # as the suffix delimited by an underscore (where N is specified by # ``swift_store_multiple_containers_seed``). # # Example: if the seed is set to 3 and swift_store_container = ``glance``, then # an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in # the container ``glance_fda``. All dashes in the UUID are included when # creating the container name but do not count toward the character limit, so # when N=10 the container name would be ``glance_fdae39a1-ba.`` # # Possible values: # * If using single container, this configuration option can be any string # that is a valid swift container name in Glance's Swift account # * If using multiple containers, this configuration option can be any # string as long as it satisfies the container naming rules enforced by # Swift. The value of ``swift_store_multiple_containers_seed`` should be # taken into account as well. # # Related options: # * ``swift_store_multiple_containers_seed`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (string value) #swift_store_container = glance # # The size threshold, in MB, after which Glance will start segmenting image # data. # # Swift has an upper limit on the size of a single uploaded object. By default, # this is 5GB. To upload objects bigger than this limit, objects are segmented # into multiple smaller objects that are tied together with a manifest file. # For more detail, refer to # https://docs.openstack.org/swift/latest/overview_large_objects.html # # This configuration option specifies the size threshold over which the Swift # driver will start segmenting image data into multiple smaller files. # Currently, the Swift driver only supports creating Dynamic Large Objects. # # NOTE: This should be set by taking into account the large object limit # enforced by the Swift cluster in consideration. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by the Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_chunk_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_size = 5120 # # The maximum size, in MB, of the segments when image data is segmented. # # When image data is segmented to upload images that are larger than the limit # enforced by the Swift cluster, image data is broken into segments that are no # bigger than the size specified by this configuration option. # Refer to ``swift_store_large_object_size`` for more detail. # # For example: if ``swift_store_large_object_size`` is 5GB and # ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be # segmented into 7 segments where the first six segments will be 1GB in size and # the seventh segment will be 0.2GB. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_chunk_size = 200 # # Create container, if it doesn't already exist, when uploading image. # # At the time of uploading an image, if the corresponding container doesn't # exist, it will be created provided this configuration option is set to True. # By default, it won't be created. This behavior is applicable for both single # and multiple containers mode. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #swift_store_create_container_on_put = false # # Store images in tenant's Swift account. # # This enables multi-tenant storage mode which causes Glance images to be stored # in tenant specific Swift accounts. If this is disabled, Glance stores all # images in its own account. More details multi-tenant store can be found at # https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage # # NOTE: If using multi-tenant swift store, please make sure # that you do not set a swift configuration file with the # 'swift_store_config_file' option. # # Possible values: # * True # * False # # Related options: # * swift_store_config_file # # (boolean value) #swift_store_multi_tenant = false # # Seed indicating the number of containers to use for storing images. # # When using a single-tenant store, images can be stored in one or more than one # containers. When set to 0, all images will be stored in one single container. # When set to an integer value between 1 and 32, multiple containers will be # used to store images. This configuration option will determine how many # containers are created. The total number of containers that will be used is # equal to 16^N, so if this config option is set to 2, then 16^2=256 containers # will be used to store images. # # Please refer to ``swift_store_container`` for more detail on the naming # convention. More detail about using multiple containers can be found at # https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- # multiple-containers.html # # NOTE: This is used only when swift_store_multi_tenant is disabled. # # Possible values: # * A non-negative integer less than or equal to 32 # # Related options: # * ``swift_store_container`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (integer value) # Minimum value: 0 # Maximum value: 32 #swift_store_multiple_containers_seed = 0 # # List of tenants that will be granted admin access. # # This is a list of tenants that will be granted read/write access on # all Swift containers created by Glance in multi-tenant mode. The # default value is an empty list. # # Possible values: # * A comma separated list of strings representing UUIDs of Keystone # projects/tenants # # Related options: # * None # # (list value) #swift_store_admin_tenants = # # SSL layer compression for HTTPS Swift requests. # # Provide a boolean value to determine whether or not to compress # HTTPS Swift requests for images at the SSL layer. By default, # compression is enabled. # # When using Swift as the backend store for Glance image storage, # SSL layer compression of HTTPS Swift requests can be set using # this option. If set to False, SSL layer compression of HTTPS # Swift requests is disabled. Disabling this option may improve # performance for images which are already in a compressed format, # for example, qcow2. # # Possible values: # * True # * False # # Related Options: # * None # # (boolean value) #swift_store_ssl_compression = true # # The number of times a Swift download will be retried before the # request fails. # # Provide an integer value representing the number of times an image # download must be retried before erroring out. The default value is # zero (no retry on a failed image download). When set to a positive # integer value, ``swift_store_retry_get_count`` ensures that the # download is attempted this many more times upon a download failure # before sending an error message. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_retry_get_count = 0 # # Time in seconds defining the size of the window in which a new # token may be requested before the current token is due to expire. # # Typically, the Swift storage driver fetches a new token upon the # expiration of the current token to ensure continued access to # Swift. However, some Swift transactions (like uploading image # segments) may not recover well if the token expires on the fly. # # Hence, by fetching a new token before the current token expiration, # we make sure that the token does not expire or is close to expiry # before a transaction is attempted. By default, the Swift storage # driver requests for a new token 60 seconds or less before the # current token expiration. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_expire_soon_interval = 60 # # Use trusts for multi-tenant Swift store. # # This option instructs the Swift store to create a trust for each # add/get request when the multi-tenant store is in use. Using trusts # allows the Swift store to avoid problems that can be caused by an # authentication token expiring during the upload or download of data. # # By default, ``swift_store_use_trusts`` is set to ``True``(use of # trusts is enabled). If set to ``False``, a user token is used for # the Swift connection instead, eliminating the overhead of trust # creation. # # NOTE: This option is considered only when # ``swift_store_multi_tenant`` is set to ``True`` # # Possible values: # * True # * False # # Related options: # * swift_store_multi_tenant # # (boolean value) #swift_store_use_trusts = true # # Buffer image segments before upload to Swift. # # Provide a boolean value to indicate whether or not Glance should # buffer image data to disk while uploading to swift. This enables # Glance to resume uploads on error. # # NOTES: # When enabling this option, one should take great care as this # increases disk usage on the API node. Be aware that depending # upon how the file system is configured, the disk space used # for buffering may decrease the actual disk space available for # the glance image cache. Disk utilization will cap according to # the following equation: # (``swift_store_large_object_chunk_size`` * ``workers`` * 1000) # # Possible values: # * True # * False # # Related options: # * swift_upload_buffer_dir # # (boolean value) #swift_buffer_on_upload = false # # Reference to default Swift account/backing store parameters. # # Provide a string value representing a reference to the default set # of parameters required for using swift account/backing store for # image storage. The default reference value for this configuration # option is 'ref1'. This configuration option dereferences the # parameters and facilitates image storage in Swift storage backend # every time a new image is added. # # Possible values: # * A valid string value # # Related options: # * None # # (string value) #default_swift_reference = ref1 # DEPRECATED: Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_version' in the Swift back-end configuration file is # used instead. #swift_store_auth_version = 2 # DEPRECATED: The address where the Swift authentication service is listening. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_address' in the Swift back-end configuration file is # used instead. #swift_store_auth_address = # DEPRECATED: The user to authenticate against the Swift authentication service. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'user' in the Swift back-end configuration file is set instead. #swift_store_user = # DEPRECATED: Auth key for the user authenticating against the Swift # authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'key' in the Swift back-end configuration file is used # to set the authentication key instead. #swift_store_key = # # Absolute path to the file containing the swift account(s) # configurations. # # Include a string value representing the path to a configuration # file that has references for each of the configured Swift # account(s)/backing stores. By default, no file path is specified # and customized Swift referencing is disabled. Configuring this # option is highly recommended while using Swift storage backend for # image storage as it avoids storage of credentials in the database. # # NOTE: Please do not configure this option if you have set # ``swift_store_multi_tenant`` to ``True``. # # Possible values: # * String value representing an absolute path on the glance-api # node # # Related options: # * swift_store_multi_tenant # # (string value) #swift_store_config_file = # # Directory to buffer image segments before upload to Swift. # # Provide a string value representing the absolute path to the # directory on the glance node where image segments will be # buffered briefly before they are uploaded to swift. # # NOTES: # * This is required only when the configuration option # ``swift_buffer_on_upload`` is set to True. # * This directory should be provisioned keeping in mind the # ``swift_store_large_object_chunk_size`` and the maximum # number of images that could be uploaded simultaneously by # a given glance node. # # Possible values: # * String value representing an absolute directory path # # Related options: # * swift_buffer_on_upload # * swift_store_large_object_chunk_size # # (string value) #swift_upload_buffer_dir = # # Address of the ESX/ESXi or vCenter Server target system. # # This configuration option sets the address of the ESX/ESXi or vCenter # Server target system. This option is required when using the VMware # storage backend. The address can contain an IP address (127.0.0.1) or # a DNS name (www.my-domain.com). # # Possible Values: # * A valid IPv4 or IPv6 address # * A valid DNS name # # Related options: # * vmware_server_username # * vmware_server_password # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_host = 127.0.0.1 # # Server username. # # This configuration option takes the username for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is the username for a user with appropriate # privileges # # Related options: # * vmware_server_host # * vmware_server_password # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_username = root # # Server password. # # This configuration option takes the password for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is a password corresponding to the username # specified using the "vmware_server_username" option # # Related options: # * vmware_server_host # * vmware_server_username # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_password = vmware # # The number of VMware API retries. # # This configuration option specifies the number of times the VMware # ESX/VC server API must be retried upon connection related issues or # server API call overload. It is not possible to specify 'retry # forever'. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_api_retry_count = 10 # # Interval in seconds used for polling remote tasks invoked on VMware # ESX/VC server. # # This configuration option takes in the sleep time in seconds for polling an # on-going async task as part of the VMWare ESX/VC server API call. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_task_poll_interval = 5 # # The directory where the glance images will be stored in the datastore. # # This configuration option specifies the path to the directory where the # glance images will be stored in the VMware datastore. If this option # is not set, the default directory where the glance images are stored # is openstack_glance. # # Possible Values: # * Any string that is a valid path to a directory # # Related options: # * None # # (string value) #vmware_store_image_dir = /openstack_glance # # Set verification of the ESX/vCenter server certificate. # # This configuration option takes a boolean value to determine # whether or not to verify the ESX/vCenter server certificate. If this # option is set to True, the ESX/vCenter server certificate is not # verified. If this option is set to False, then the default CA # truststore is used for verification. # # This option is ignored if the "vmware_ca_file" option is set. In that # case, the ESX/vCenter server certificate will then be verified using # the file specified using the "vmware_ca_file" option . # # Possible Values: # * True # * False # # Related options: # * vmware_ca_file # # (boolean value) # Deprecated group/name - [glance_store]/vmware_api_insecure #vmware_insecure = false # # Absolute path to the CA bundle file. # # This configuration option enables the operator to use a custom # Cerificate Authority File to verify the ESX/vCenter certificate. # # If this option is set, the "vmware_insecure" option will be ignored # and the CA file specified will be used to authenticate the ESX/vCenter # server certificate and establish a secure connection to the server. # # Possible Values: # * Any string that is a valid absolute path to a CA file # # Related options: # * vmware_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_ca_file = /etc/ssl/certs/ca-certificates.crt # # The datastores where the image can be stored. # # This configuration option specifies the datastores where the image can # be stored in the VMWare store backend. This option may be specified # multiple times for specifying multiple datastores. The datastore name # should be specified after its datacenter path, separated by ":". An # optional weight may be given after the datastore name, separated again # by ":" to specify the priority. Thus, the required format becomes # ::. # # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases # where the image size is already known. If no weight is given, it is # assumed to be zero and the directory will be considered for selection # last. If multiple datastores have the same weight, then the one with # the most free space available is selected. # # Possible Values: # * Any string of the format: # :: # # Related options: # * None # # (multi valued) #vmware_datastores = [healthcheck] # # From oslo.middleware.healthcheck # # DEPRECATED: The path to respond to healtcheck requests on. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #path = /healthcheck # Show more detailed information as part of the response. Security note: # Enabling this option may expose sensitive details about the service being # monitored. Be sure to verify that it will not violate your security policies. # (boolean value) #detailed = false # Additional backends that can perform health checks and report that information # back as part of a request. (list value) #backends = # A list of network addresses to limit source ip allowed to access healthcheck # information. Any request from ip outside of these network addresses are # ignored. (list value) #allowed_source_ranges = # Ignore requests with proxy headers. (boolean value) #ignore_proxied_requests = false # Check the presence of a file to determine if an application is running on a # port. Used by DisableByFileHealthcheck plugin. (string value) #disable_by_file_path = # Check the presence of a file based on a port to determine if an application is # running on a port. Expects a "port:path" list of strings. Used by # DisableByFilesPortsHealthcheck plugin. (list value) #disable_by_file_paths = [image_format] # # From glance.api # # Supported values for the 'container_format' image attribute (list value) # Deprecated group/name - [DEFAULT]/container_formats #container_formats = ami,ari,aki,bare,ovf,ova,docker,compressed # Supported values for the 'disk_format' image attribute (list value) # Deprecated group/name - [DEFAULT]/disk_formats #disk_formats = ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop # A list of strings describing allowed VMDK 'create-type' subformats that will # be allowed. This is recommended to only include single-file-with-sparse-header # variants to avoid potential host file exposure due to processing named # extents. If this list is empty, then no VDMK image types allowed. Note that # this is currently only checked during image conversion (if enabled), and # limits the types of VMDK images we will convert from. (list value) #vmdk_allowed_types = streamOptimized,monolithicSparse [key_manager] # # From castellan.config # # Specify the key manager implementation. Options are "barbican" and "vault". # Default is "barbican". Will support the values earlier set using # [key_manager]/api_class for some time. (string value) # Deprecated group/name - [key_manager]/api_class #backend = barbican # The type of authentication credential to create. Possible values are 'token', # 'password', 'keystone_token', and 'keystone_password'. Required if no context # is passed to the credential factory. (string value) #auth_type = # Token for authentication. Required for 'token' and 'keystone_token' auth_type # if no context is passed to the credential factory. (string value) #token = # Username for authentication. Required for 'password' auth_type. Optional for # the 'keystone_password' auth_type. (string value) #username = # Password for authentication. Required for 'password' and 'keystone_password' # auth_type. (string value) #password = # Use this endpoint to connect to Keystone. (string value) #auth_url = # User ID for authentication. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #user_id = # User's domain ID for authentication. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #user_domain_id = # User's domain name for authentication. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #user_domain_name = # Trust ID for trust scoping. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #trust_id = # Domain ID for domain scoping. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #domain_id = # Domain name for domain scoping. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #domain_name = # Project ID for project scoping. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #project_id = # Project name for project scoping. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #project_name = # Project's domain ID for project. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #project_domain_id = # Project's domain name for project. Optional for 'keystone_token' and # 'keystone_password' auth_type. (string value) #project_domain_name = # Allow fetching a new token if the current one is going to expire. Optional for # 'keystone_token' and 'keystone_password' auth_type. (boolean value) #reauthenticate = true [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete "public" Identity API endpoint. This endpoint should not be an # "admin" endpoint, as it should be accessible by all end users. Unauthenticated # clients are redirected to this endpoint to authenticate. Although this # endpoint should ideally be unversioned, client support in the wild varies. If # you're using a versioned v2 endpoint here, then this should *not* be the same # endpoint the service user utilizes for validating tokens, because normal end # users may not be able to reach that endpoint. (string value) # Deprecated group/name - [keystone_authtoken]/auth_uri #www_authenticate_uri = # DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not # be an "admin" endpoint, as it should be accessible by all end users. # Unauthenticated clients are redirected to this endpoint to authenticate. # Although this endpoint should ideally be unversioned, client support in the # wild varies. If you're using a versioned v2 endpoint here, then this should # *not* be the same endpoint the service user utilizes for validating tokens, # because normal end users may not be able to reach that endpoint. This option # is deprecated in favor of www_authenticate_uri and will be removed in the S # release. (string value) # This option is deprecated for removal since Queens. # Its value may be silently ignored in the future. # Reason: The auth_uri option is deprecated in favor of www_authenticate_uri and # will be removed in the S release. #auth_uri = # API version of the Identity API endpoint. (string value) #auth_version = # Interface to use for the Identity API endpoint. Valid values are "public", # "internal" (default) or "admin". (string value) #interface = internal # Do not handle authorization requests within the middleware, but delegate the # authorization decision to downstream WSGI components. (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. (integer # value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with Identity API # Server. (integer value) #http_request_max_retries = 3 # Request environment key where the Swift cache object is stored. When # auth_token middleware is deployed with a Swift cache, use this option to have # the middleware share a caching backend with swift. Otherwise, use the # ``memcached_servers`` option instead. (string value) #cache = # Required if identity server requires client certificate (string value) #certfile = # Required if identity server requires client certificate (string value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs connections. # Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = # Optionally specify a list of memcached server(s) to use for caching. If left # undefined, tokens will instead be cached in-process. (list value) # Deprecated group/name - [keystone_authtoken]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the middleware # caches previously-seen tokens for a configurable duration (in seconds). Set to # -1 to disable caching completely. (integer value) #token_cache_time = 300 # (Optional) If defined, indicate whether token data should be authenticated or # authenticated and encrypted. If MAC, token data is authenticated (with HMAC) # in the cache. If ENCRYPT, token data is encrypted and authenticated in the # cache. If the value is not one of these options or empty, auth_token will # raise an exception on initialization. (string value) # Possible values: # None - # MAC - # ENCRYPT - #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This string is # used for key derivation. (string value) #memcache_secret_key = # (Optional) Number of seconds memcached server is considered dead before it is # tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every memcached server. # (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a memcached # server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held unused in the # pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a memcached # client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. (boolean # value) #memcache_use_advanced_pool = true # (Optional) Indicate whether to set the X-Service-Catalog header. If False, # middleware will not ask for service catalog on token validation and will not # set the X-Service-Catalog header. (boolean value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: "disabled" # to not check token binding. "permissive" (default) to validate binding # information if the bind type is of a form known to the server and ignore it if # not. "strict" like "permissive" but if the bind type is unknown the token will # be rejected. "required" any form of token binding is needed to be allowed. # Finally the name of a binding method that must be present in tokens. (string # value) #enforce_token_bind = permissive # A choice of roles that must be present in a service token. Service tokens are # allowed to request that an expired token can be used and so this check should # tightly control that only actual services should be sending this token. Roles # here are applied as an ANY check so any role in this list must be present. For # backwards compatibility reasons this currently only affects the allow_expired # check. (list value) #service_token_roles = service # For backwards compatibility reasons we must let valid service tokens pass that # don't pass the service_token_roles check as valid. Setting this true will # become the default in a future release and should be enabled if possible. # (boolean value) #service_token_roles_required = false # The name or type of the service as it appears in the service catalog. This is # used to validate tokens that have restricted access rules. (string value) #service_type = # Authentication type to load (string value) # Deprecated group/name - [keystone_authtoken]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (string value) #auth_section = [os_brick] # # From os_brick # # Directory to use for os-brick lock files. Defaults to # oslo_concurrency.lock_path which is a sensible default for compute nodes, but # not for HCI deployments or controllers where Glance uses Cinder as a backend, # as locks should use the same directory. (string value) #lock_path = [oslo_concurrency] # # From oslo.concurrency # # Enables or disables inter-process locks. (boolean value) #disable_process_locking = false # Directory to use for lock files. For security, the specified directory should # only be writable by the user running the processes that need locking. Defaults # to environment variable OSLO_LOCK_PATH. If external locks are used, a lock # path must be set. (string value) #lock_path = [oslo_limit] # # From oslo.limit # # The service's endpoint id which is registered in Keystone. (string value) #endpoint_id = # PEM encoded Certificate Authority to use when verifying HTTPs connections. # (string value) #cafile = # PEM encoded client certificate cert file (string value) #certfile = # PEM encoded client certificate key file (string value) #keyfile = # Verify HTTPS connections. (boolean value) #insecure = false # Timeout value for http requests (integer value) #timeout = # Collect per-API call timing information. (boolean value) #collect_timing = false # Log requests to multiple loggers. (boolean value) #split_loggers = false # Authentication URL (string value) #auth_url = # Scope for system operations (string value) #system_scope = # Domain ID to scope to (string value) #domain_id = # Domain name to scope to (string value) #domain_name = # Project ID to scope to (string value) #project_id = # Project name to scope to (string value) #project_name = # Domain ID containing project (string value) #project_domain_id = # Domain name containing project (string value) #project_domain_name = # ID of the trust to use as a trustee use (string value) #trust_id = # Optional domain ID to use with v3 and v2 parameters. It will be used for both # the user and project domain in v3 and ignored in v2 authentication. (string # value) #default_domain_id = # Optional domain name to use with v3 API and v2 parameters. It will be used for # both the user and project domain in v3 and ignored in v2 authentication. # (string value) #default_domain_name = # User ID (string value) #user_id = # Username (string value) # Deprecated group/name - [oslo_limit]/user_name #username = # User's domain id (string value) #user_domain_id = # User's domain name (string value) #user_domain_name = # User's password (string value) #password = # Tenant ID (string value) #tenant_id = # Tenant Name (string value) #tenant_name = # The default service_type for endpoint URL discovery. (string value) #service_type = # The default service_name for endpoint URL discovery. (string value) #service_name = # List of interfaces, in order of preference, for endpoint URL. (list value) #valid_interfaces = # The default region_name for endpoint URL discovery. (string value) #region_name = # Always use this endpoint URL for requests for this client. NOTE: The # unversioned endpoint should be specified here; to request a particular API # version, use the `version`, `min-version`, and/or `max-version` options. # (string value) #endpoint_override = # Minimum Major API version within a given Major API version for endpoint URL # discovery. Mutually exclusive with min_version and max_version (string value) #version = # The minimum major version of a given API, intended to be used as the lower # bound of a range with max_version. Mutually exclusive with version. If # min_version is given with no max_version it is as if max version is "latest". # (string value) #min_version = # The maximum major version of a given API, intended to be used as the upper # bound of a range with min_version. Mutually exclusive with version. (string # value) #max_version = # The maximum number of retries that should be attempted for connection errors. # (integer value) #connect_retries = # Delay (in seconds) between two retries for connection errors. If not set, # exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is # used. (floating point value) #connect_retry_delay = # The maximum number of retries that should be attempted for retriable HTTP # status codes. (integer value) #status_code_retries = # Delay (in seconds) between two retries for retriable status codes. If not set, # exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is # used. (floating point value) #status_code_retry_delay = # List of retriable HTTP status codes that should be retried. If not set default # to [503] (list value) #retriable_status_codes = [oslo_messaging_amqp] # # From oslo.messaging # # Name for the AMQP container. must be globally unique. Defaults to a generated # UUID (string value) #container_name = # Timeout for inactive connections (in seconds) (integer value) #idle_timeout = 0 # Debug: dump AMQP frames to stdout (boolean value) #trace = false # Attempt to connect via SSL. If no other ssl-related parameters are given, it # will use the system's CA-bundle to verify the server's certificate. (boolean # value) #ssl = false # CA certificate PEM file used to verify the server's certificate (string value) #ssl_ca_file = # Self-identifying certificate PEM file for client authentication (string value) #ssl_cert_file = # Private key PEM file used to sign ssl_cert_file certificate (optional) (string # value) #ssl_key_file = # Password for decrypting ssl_key_file (if encrypted) (string value) #ssl_key_password = # By default SSL checks that the name in the server's certificate matches the # hostname in the transport_url. In some configurations it may be preferable to # use the virtual hostname instead, for example if the server uses the Server # Name Indication TLS extension (rfc6066) to provide a certificate per virtual # host. Set ssl_verify_vhost to True if the server's SSL certificate uses the # virtual host name instead of the DNS name. (boolean value) #ssl_verify_vhost = false # Space separated list of acceptable SASL mechanisms (string value) #sasl_mechanisms = # Path to directory that contains the SASL configuration (string value) #sasl_config_dir = # Name of configuration file (without .conf suffix) (string value) #sasl_config_name = # SASL realm to use if no realm present in username (string value) #sasl_default_realm = # Seconds to pause before attempting to re-connect. (integer value) # Minimum value: 1 #connection_retry_interval = 1 # Increase the connection_retry_interval by this many seconds after each # unsuccessful failover attempt. (integer value) # Minimum value: 0 #connection_retry_backoff = 2 # Maximum limit for connection_retry_interval + connection_retry_backoff # (integer value) # Minimum value: 1 #connection_retry_interval_max = 30 # Time to pause between re-connecting an AMQP 1.0 link that failed due to a # recoverable error. (integer value) # Minimum value: 1 #link_retry_delay = 10 # The maximum number of attempts to re-send a reply message which failed due to # a recoverable error. (integer value) # Minimum value: -1 #default_reply_retry = 0 # The deadline for an rpc reply message delivery. (integer value) # Minimum value: 5 #default_reply_timeout = 30 # The deadline for an rpc cast or call message delivery. Only used when caller # does not provide a timeout expiry. (integer value) # Minimum value: 5 #default_send_timeout = 30 # The deadline for a sent notification message delivery. Only used when caller # does not provide a timeout expiry. (integer value) # Minimum value: 5 #default_notify_timeout = 30 # The duration to schedule a purge of idle sender links. Detach link after # expiry. (integer value) # Minimum value: 1 #default_sender_link_timeout = 600 # Indicates the addressing mode used by the driver. # Permitted values: # 'legacy' - use legacy non-routable addressing # 'routable' - use routable addresses # 'dynamic' - use legacy addresses if the message bus does not support routing # otherwise use routable addressing (string value) #addressing_mode = dynamic # Enable virtual host support for those message buses that do not natively # support virtual hosting (such as qpidd). When set to true the virtual host # name will be added to all message bus addresses, effectively creating a # private 'subnet' per virtual host. Set to False if the message bus supports # virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative # as the name of the virtual host. (boolean value) #pseudo_vhost = true # address prefix used when sending to a specific server (string value) #server_request_prefix = exclusive # address prefix used when broadcasting to all servers (string value) #broadcast_prefix = broadcast # address prefix when sending to any server in group (string value) #group_request_prefix = unicast # Address prefix for all generated RPC addresses (string value) #rpc_address_prefix = openstack.org/om/rpc # Address prefix for all generated Notification addresses (string value) #notify_address_prefix = openstack.org/om/notify # Appended to the address prefix when sending a fanout message. Used by the # message bus to identify fanout messages. (string value) #multicast_address = multicast # Appended to the address prefix when sending to a particular RPC/Notification # server. Used by the message bus to identify messages sent to a single # destination. (string value) #unicast_address = unicast # Appended to the address prefix when sending to a group of consumers. Used by # the message bus to identify messages that should be delivered in a round-robin # fashion across consumers. (string value) #anycast_address = anycast # Exchange name used in notification addresses. # Exchange name resolution precedence: # Target.exchange if set # else default_notification_exchange if set # else control_exchange if set # else 'notify' (string value) #default_notification_exchange = # Exchange name used in RPC addresses. # Exchange name resolution precedence: # Target.exchange if set # else default_rpc_exchange if set # else control_exchange if set # else 'rpc' (string value) #default_rpc_exchange = # Window size for incoming RPC Reply messages. (integer value) # Minimum value: 1 #reply_link_credit = 200 # Window size for incoming RPC Request messages (integer value) # Minimum value: 1 #rpc_server_credit = 100 # Window size for incoming Notification messages (integer value) # Minimum value: 1 #notify_server_credit = 100 # Send messages of this type pre-settled. # Pre-settled messages will not receive acknowledgement # from the peer. Note well: pre-settled messages may be # silently discarded if the delivery fails. # Permitted values: # 'rpc-call' - send RPC Calls pre-settled # 'rpc-reply'- send RPC Replies pre-settled # 'rpc-cast' - Send RPC Casts pre-settled # 'notify' - Send Notifications pre-settled # (multi valued) #pre_settled = rpc-cast #pre_settled = rpc-reply [oslo_messaging_kafka] # # From oslo.messaging # # Max fetch bytes of Kafka consumer (integer value) #kafka_max_fetch_bytes = 1048576 # Default timeout(s) for Kafka consumers (floating point value) #kafka_consumer_timeout = 1.0 # DEPRECATED: Pool Size for Kafka Consumers (integer value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Driver no longer uses connection pool. #pool_size = 10 # DEPRECATED: The pool size limit for connections expiration policy (integer # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Driver no longer uses connection pool. #conn_pool_min_size = 2 # DEPRECATED: The time-to-live in sec of idle connections in the pool (integer # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Driver no longer uses connection pool. #conn_pool_ttl = 1200 # Group id for Kafka consumer. Consumers in one group will coordinate message # consumption (string value) #consumer_group = oslo_messaging_consumer # Upper bound on the delay for KafkaProducer batching in seconds (floating point # value) #producer_batch_timeout = 0.0 # Size of batch for the producer async send (integer value) #producer_batch_size = 16384 # The compression codec for all data generated by the producer. If not set, # compression will not be used. Note that the allowed values of this depend on # the kafka version (string value) # Possible values: # none - # gzip - # snappy - # lz4 - # zstd - #compression_codec = none # Enable asynchronous consumer commits (boolean value) #enable_auto_commit = false # The maximum number of records returned in a poll call (integer value) #max_poll_records = 500 # Protocol used to communicate with brokers (string value) # Possible values: # PLAINTEXT - # SASL_PLAINTEXT - # SSL - # SASL_SSL - #security_protocol = PLAINTEXT # Mechanism when security protocol is SASL (string value) #sasl_mechanism = PLAIN # CA certificate PEM file used to verify the server certificate (string value) #ssl_cafile = # Client certificate PEM file used for authentication. (string value) #ssl_client_cert_file = # Client key PEM file used for authentication. (string value) #ssl_client_key_file = # Client key password file used for authentication. (string value) #ssl_client_key_password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are messaging, # messagingv2, routing, log, test, noop (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for notifications. If not set, # we fall back to the same configuration used for RPC. (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications # The maximum number of attempts to re-send a notification message which failed # to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite # (integer value) #retry = -1 [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. If rabbit_quorum_queue is enabled, queues will be # durable and this value will be ignored. (boolean value) #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) #amqp_auto_delete = false # Connect over SSL. (boolean value) # Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl #ssl = false # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some # distributions. (string value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version #ssl_version = # SSL key file (valid only if SSL enabled). (string value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile #ssl_key_file = # SSL cert file (valid only if SSL enabled). (string value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile #ssl_cert_file = # SSL certification authority file (valid only if SSL enabled). (string value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs #ssl_ca_file = # Global toggle for enforcing the OpenSSL FIPS mode. This feature requires # Python support. This is available in Python 3.9 in all environments and may # have been backported to older Python versions on select environments. If the # Python executable used does not support OpenSSL FIPS mode, an exception will # be raised. (boolean value) #ssl_enforce_fips_mode = false # Run the health check heartbeat thread through a native python thread by # default. If this option is equal to False then the health check heartbeat will # inherit the execution model from the parent process. For example if the parent # process has monkey patched the stdlib by using eventlet/greenlet then the # heartbeat will be run through a green thread. This option should be set to # True only for the wsgi services. (boolean value) #heartbeat_in_pthread = false # How long to wait (in seconds) before reconnecting in response to an AMQP # consumer cancel notification. (floating point value) # Minimum value: 0.0 # Maximum value: 4.5 #kombu_reconnect_delay = 1.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not # be used. This option may not be available in future versions. (string value) #kombu_compression = # How long to wait a missing client before abandoning to send it its replies. # This value should not be longer than rpc_response_timeout. (integer value) # Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we are # currently connected to becomes unavailable. Takes effect only if more than one # RabbitMQ node is provided in config. (string value) # Possible values: # round-robin - # shuffle - #kombu_failover_strategy = round-robin # The RabbitMQ login method. (string value) # Possible values: # PLAIN - # AMQPLAIN - # EXTERNAL - # RABBIT-CR-DEMO - #rabbit_login_method = AMQPLAIN # How frequently to retry connecting with RabbitMQ. (integer value) #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. (integer # value) #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. Default is 30 seconds. # (integer value) #rabbit_interval_max = 30 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring # is no longer controlled by the x-ha-policy argument when declaring a queue. If # you just want to make sure that all queues (except those with auto-generated # names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA # '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) #rabbit_ha_queues = false # Use quorum queues in RabbitMQ (x-queue-type: quorum). The quorum queue is a # modern queue type for RabbitMQ implementing a durable, replicated FIFO queue # based on the Raft consensus algorithm. It is available as of RabbitMQ 3.8.0. # If set this option will conflict with the HA queues (``rabbit_ha_queues``) aka # mirrored queues, in other words the HA queues should be disabled. Quorum # queues are also durable by default so the amqp_durable_queues option is # ignored when this option is enabled. (boolean value) #rabbit_quorum_queue = false # Use quorum queues for transients queues in RabbitMQ. Enabling this option will # then make sure those queues are also using quorum kind of rabbit queues, which # are HA by default. (boolean value) #rabbit_transient_quorum_queue = false # Each time a message is redelivered to a consumer, a counter is incremented. # Once the redelivery count exceeds the delivery limit the message gets dropped # or dead-lettered (if a DLX exchange has been configured) Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit. # (integer value) #rabbit_quorum_delivery_limit = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of messages in the quorum queue. Used only when rabbit_quorum_queue is # enabled, Default 0 which means dont set a limit. (integer value) # Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_length #rabbit_quorum_max_memory_length = 0 # By default all messages are maintained in memory if a quorum queue grows in # length it can put memory pressure on a cluster. This option can limit the # number of memory bytes used by the quorum queue. Used only when # rabbit_quorum_queue is enabled, Default 0 which means dont set a limit. # (integer value) # Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_bytes #rabbit_quorum_max_memory_bytes = 0 # Positive integer representing duration in seconds for queue TTL (x-expires). # Queues which are unused for the duration of the TTL are automatically deleted. # The parameter affects only reply and fanout queues. Setting 0 as value will # disable the x-expires. If doing so, make sure you have a rabbitmq policy to # delete the queues or you deployment will create an infinite number of queue # over time. (integer value) # Minimum value: 0 #rabbit_transient_queues_ttl = 1800 # Specifies the number of messages to prefetch. Setting to zero allows unlimited # messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down if # heartbeat's keep-alive fails (0 disables heartbeat). (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the heartbeat. # (integer value) #heartbeat_rate = 3 # DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for direct # send. The direct send is used as reply, so the MessageUndeliverable exception # is raised in case the client queue does not exist.MessageUndeliverable # exception will be used to loop for a timeout to lets a chance to sender to # recover.This flag is deprecated and it will not be possible to deactivate this # functionality anymore (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Mandatory flag no longer deactivable. #direct_mandatory_flag = true # Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and # notify consumerswhen queue is down (boolean value) #enable_cancel_on_failover = false # Should we use consistant queue names or random ones (boolean value) #use_queue_manager = false # Hostname used by queue manager. Defaults to the value returned by # socket.gethostname(). (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #hostname = node1.example.com # Process name used by queue manager (string value) #processname = oslo-config-generator # Use stream queues in RabbitMQ (x-queue-type: stream). Streams are a new # persistent and replicated data structure ("queue type") in RabbitMQ which # models an append-only log with non-destructive consumer semantics. It is # available as of RabbitMQ 3.9.0. If set this option will replace all fanout # queues with only one stream queue. (boolean value) #rabbit_stream_fanout = false [oslo_middleware] # # From oslo.middleware.http_proxy_to_wsgi # # Whether the application is behind a proxy or not. This determines if the # middleware should parse the headers or not. (boolean value) #enable_proxy_headers_parsing = false [oslo_policy] # # From oslo.policy # # This option controls whether or not to enforce scope when evaluating policies. # If ``True``, the scope of the token used in the request is compared to the # ``scope_types`` of the policy being enforced. If the scopes do not match, an # ``InvalidScope`` exception will be raised. If ``False``, a message will be # logged informing operators that policies are being invoked with mismatching # scope. (boolean value) #enforce_scope = true # This option controls whether or not to use old deprecated defaults when # evaluating policies. If ``True``, the old deprecated defaults are not going to # be evaluated. This means if any existing token is allowed for old defaults but # is disallowed for new defaults, it will be disallowed. It is encouraged to # enable this flag along with the ``enforce_scope`` flag so that you can get the # benefits of new defaults and ``scope_type`` together. If ``False``, the # deprecated policy check string is logically OR'd with the new policy check # string, allowing for a graceful upgrade experience between releases with new # policies, which is the default behavior. (boolean value) #enforce_new_defaults = true # The relative or absolute path of a file that maps roles to permissions for a # given service. Relative paths must be specified in relation to the # configuration file setting this option. (string value) #policy_file = policy.yaml # Default rule. Enforced when a requested rule is not found. (string value) #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored. (multi # valued) #policy_dirs = policy.d # Content Type to send and receive data for REST based policy check (string # value) # Possible values: # application/x-www-form-urlencoded - # application/json - #remote_content_type = application/x-www-form-urlencoded # server identity verification for REST based policy check (boolean value) #remote_ssl_verify_server_crt = false # Absolute path to ca cert file for REST based policy check (string value) #remote_ssl_ca_crt_file = # Absolute path to client cert for REST based policy check (string value) #remote_ssl_client_crt_file = # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = [oslo_reports] # # From oslo.reports # # Path to a log directory where to create a file (string value) #log_dir = # The path to a file to watch for changes to trigger the reports, instead of # signals. Setting this option disables the signal trigger for the reports. If # application is running as a WSGI application it is recommended to use this # instead of signals. (string value) #file_event_handler = # How many seconds to wait between polls when file_event_handler is set (integer # value) #file_event_handler_interval = 1 [paste_deploy] # # From glance.api # # # Deployment flavor to use in the server application pipeline. # # Provide a string value representing the appropriate deployment # flavor used in the server application pipeline. This is typically # the partial name of a pipeline in the paste configuration file with # the service name removed. # # For example, if your paste section name in the paste configuration # file is [pipeline:glance-api-keystone], set ``flavor`` to # ``keystone``. # # Possible values: # * String value representing a partial pipeline name. # # Related Options: # * config_file # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #flavor = keystone # # Name of the paste configuration file. # # Provide a string value representing the name of the paste # configuration file to use for configuring pipelines for # server application deployments. # # NOTES: # * Provide the name or the path relative to the glance directory # for the paste configuration file and not the absolute path. # * The sample paste configuration file shipped with Glance need # not be edited in most cases as it comes with ready-made # pipelines for all common deployment flavors. # # If no value is specified for this option, the ``paste.ini`` file # with the prefix of the corresponding Glance service's configuration # file name will be searched for in the known configuration # directories. (For example, if this option is missing from or has no # value set in ``glance-api.conf``, the service will look for a file # named ``glance-api-paste.ini``.) If the paste configuration file is # not found, the service will not start. # # Possible values: # * A string value representing the name of the paste configuration # file. # # Related Options: # * flavor # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #config_file = glance-api-paste.ini [profiler] # # From glance.api # # # Enable the profiling for all services on this node. # # Default value is False (fully disable the profiling feature). # # Possible values: # # * True: Enables the feature # * False: Disables the feature. The profiling cannot be started via this # project # operations. If the profiling is triggered by another project, this project # part will be empty. # (boolean value) # Deprecated group/name - [profiler]/profiler_enabled #enabled = false # # Enable SQL requests profiling in services. # # Default value is False (SQL requests won't be traced). # # Possible values: # # * True: Enables SQL requests profiling. Each SQL query will be part of the # trace and can the be analyzed by how much time was spent for that. # * False: Disables SQL requests profiling. The spent time is only shown on a # higher level of operations. Single SQL queries cannot be analyzed this way. # (boolean value) #trace_sqlalchemy = false # # Enable python requests package profiling. # # Supported drivers: jaeger+otlp # # Default value is False. # # Possible values: # # * True: Enables requests profiling. # * False: Disables requests profiling. # (boolean value) #trace_requests = false # # Secret key(s) to use for encrypting context data for performance profiling. # # This string value should have the following format: [,,...], # where each key is some random string. A user who triggers the profiling via # the REST API has to set one of these keys in the headers of the REST API call # to include profiling results of this node for this particular project. # # Both "enabled" flag and "hmac_keys" config options should be set to enable # profiling. Also, to generate correct profiling information across all services # at least one key needs to be consistent between OpenStack projects. This # ensures it can be used from client side to generate the trace, containing # information from all possible resources. # (string value) #hmac_keys = SECRET_KEY # # Connection string for a notifier backend. # # Default value is ``messaging://`` which sets the notifier to oslo_messaging. # # Examples of possible values: # # * ``messaging://`` - use oslo_messaging driver for sending spans. # * ``redis://127.0.0.1:6379`` - use redis driver for sending spans. # * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans. # * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending # spans. # * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending # spans. # (string value) #connection_string = messaging:// # # Document type for notification indexing in elasticsearch. # (string value) #es_doc_type = notification # # This parameter is a time value parameter (for example: es_scroll_time=2m), # indicating for how long the nodes that participate in the search will maintain # relevant resources in order to continue and support it. # (string value) #es_scroll_time = 2m # # Elasticsearch splits large requests in batches. This parameter defines # maximum size of each batch (for example: es_scroll_size=10000). # (integer value) #es_scroll_size = 10000 # # Redissentinel provides a timeout option on the connections. # This parameter defines that timeout (for example: socket_timeout=0.1). # (floating point value) #socket_timeout = 0.1 # # Redissentinel uses a service name to identify a master redis service. # This parameter defines the name (for example: # ``sentinal_service_name=mymaster``). # (string value) #sentinel_service_name = mymaster # # Enable filter traces that contain error/exception to a separated place. # # Default value is set to False. # # Possible values: # # * True: Enable filter traces that contain error/exception. # * False: Disable the filter. # (boolean value) #filter_error_trace = false [task] # # From glance.api # # Time in hours for which a task lives after, either succeeding or failing # (integer value) # Deprecated group/name - [DEFAULT]/task_time_to_live #task_time_to_live = 48 # # Task executor to be used to run task scripts. # # Provide a string value representing the executor to use for task # executions. By default, ``TaskFlow`` executor is used. # # ``TaskFlow`` helps make task executions easy, consistent, scalable # and reliable. It also enables creation of lightweight task objects # and/or functions that are combined together into flows in a # declarative manner. # # Possible values: # * taskflow # # Related Options: # * None # # (string value) #task_executor = taskflow # # Absolute path to the work directory to use for asynchronous # task operations. # # The directory set here will be used to operate over images - # normally before they are imported in the destination store. # # NOTE: When providing a value for ``work_dir``, please make sure # that enough space is provided for concurrent tasks to run # efficiently without running out of space. # # A rough estimation can be done by multiplying the number of # ``max_workers`` with an average image size (e.g 500MB). The image # size estimation should be done based on the average size in your # deployment. Note that depending on the tasks running you may need # to multiply this number by some factor depending on what the task # does. For example, you may want to double the available size if # image conversion is enabled. All this being said, remember these # are just estimations and you should do them based on the worst # case scenario and be prepared to act in case they were wrong. # # Possible values: # * String value representing the absolute path to the working # directory # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #work_dir = /work_dir [taskflow_executor] # # From glance.api # # # Set the taskflow engine mode. # # Provide a string type value to set the mode in which the taskflow # engine would schedule tasks to the workers on the hosts. Based on # this mode, the engine executes tasks either in single or multiple # threads. The possible values for this configuration option are: # ``serial`` and ``parallel``. When set to ``serial``, the engine runs # all the tasks in a single thread which results in serial execution # of tasks. Setting this to ``parallel`` makes the engine run tasks in # multiple threads. This results in parallel execution of tasks. # # Possible values: # * serial # * parallel # # Related options: # * max_workers # # (string value) # Possible values: # serial - # parallel - #engine_mode = parallel # # Set the number of engine executable tasks. # # Provide an integer value to limit the number of workers that can be # instantiated on the hosts. In other words, this number defines the # number of parallel tasks that can be executed at the same time by # the taskflow engine. This value can be greater than one when the # engine mode is set to parallel. # # Possible values: # * Integer value greater than or equal to 1 # # Related options: # * engine_mode # # (integer value) # Minimum value: 1 # Deprecated group/name - [task]/eventlet_executor_pool_size #max_workers = 10 # # Set the desired image conversion format. # # Provide a valid image format to which you want images to be # converted before they are stored for consumption by Glance. # Appropriate image format conversions are desirable for specific # storage backends in order to facilitate efficient handling of # bandwidth and usage of the storage infrastructure. # # By default, ``conversion_format`` is not set and must be set # explicitly in the configuration file. # # The allowed values for this option are ``raw``, ``qcow2`` and # ``vmdk``. The ``raw`` format is the unstructured disk format and # should be chosen when RBD or Ceph storage backends are used for # image storage. ``qcow2`` is supported by the QEMU emulator that # expands dynamically and supports Copy on Write. The ``vmdk`` is # another common disk format supported by many common virtual machine # monitors like VMWare Workstation. # # Possible values: # * qcow2 # * raw # * vmdk # # Related options: # * disk_formats # # (string value) # Possible values: # qcow2 - # raw - # vmdk - # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #conversion_format = raw [vault] # # From castellan.config # # root token for vault (string value) #root_token_id = # AppRole role_id for authentication with vault (string value) #approle_role_id = # AppRole secret_id for authentication with vault (string value) #approle_secret_id = # Mountpoint of KV store in Vault to use, for example: secret (string value) #kv_mountpoint = secret # Path relative to root of KV store in Vault to use. (string value) #kv_path = # Version of KV store in Vault to use, for example: 2 (integer value) #kv_version = 2 # Use this endpoint to connect to Vault, for example: "http://127.0.0.1:8200" # (string value) #vault_url = http://127.0.0.1:8200 # Absolute path to ca cert file (string value) #ssl_ca_crt_file = # SSL Enabled/Disabled (boolean value) #use_ssl = false # Vault Namespace to use for all requests to Vault. Vault Namespaces feature is # available only in Vault Enterprise (string value) #namespace = # Timeout (in seconds) in each request to Vault (floating point value) #timeout = 60 [wsgi] # # From glance.api # # # The number of threads (per worker process) in the pool for processing # asynchronous tasks. This controls how many asynchronous tasks (i.e. for # image interoperable import) each worker can run at a time. If this is # too large, you *may* have increased memory footprint per worker and/or you # may overwhelm other system resources such as disk or outbound network # bandwidth. If this is too small, image import requests will have to wait # until a thread becomes available to begin processing. (integer value) # Minimum value: 1 #task_pool_threads = 16 # # Path to the python interpreter to use when spawning external # processes. If left unspecified, this will be sys.executable, which should # be the same interpreter running Glance itself. However, in some situations # (for example, uwsgi) sys.executable may not actually point to a python # interpreter and an alternative value must be set. (string value) #python_interpreter = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-cache.conf0000664000175000017500000024364500000000000017010 0ustar00zuulzuul00000000000000[DEFAULT] # # From glance.cache # # # Secure hashing algorithm used for computing the 'os_hash_value' property. # # This option configures the Glance "multihash", which consists of two # image properties: the 'os_hash_algo' and the 'os_hash_value'. The # 'os_hash_algo' will be populated by the value of this configuration # option, and the 'os_hash_value' will be populated by the hexdigest computed # when the algorithm is applied to the uploaded or imported image data. # # The value must be a valid secure hash algorithm name recognized by the # python 'hashlib' library. You can determine what these are by examining # the 'hashlib.algorithms_available' data member of the version of the # library being used in your Glance installation. For interoperability # purposes, however, we recommend that you use the set of secure hash # names supplied by the 'hashlib.algorithms_guaranteed' data member because # those algorithms are guaranteed to be supported by the 'hashlib' library # on all platforms. Thus, any image consumer using 'hashlib' locally should # be able to verify the 'os_hash_value' of the image. # # The default value of 'sha512' is a performant secure hash algorithm. # # If this option is misconfigured, any attempts to store image data will fail. # For that reason, we recommend using the default value. # # Possible values: # * Any secure hash algorithm name recognized by the Python 'hashlib' # library # # Related options: # * None # # (string value) #hashing_algorithm = sha512 # # Maximum number of image members per image. # # This limits the maximum of users an image can be shared with. Any negative # value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_member_quota = 128 # # Maximum number of properties allowed on an image. # # This enforces an upper limit on the number of additional properties an image # can have. Any negative value is interpreted as unlimited. # # (integer value) #image_property_quota = 128 # # Maximum number of tags allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_tag_quota = 128 # # Maximum number of locations allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_location_quota = 10 # # The default number of results to return for a request. # # Responses to certain API requests, like list images, may return # multiple items. The number of results returned can be explicitly # controlled by specifying the ``limit`` parameter in the API request. # However, if a ``limit`` parameter is not specified, this # configuration value will be used as the default number of results to # be returned for any API request. # # NOTES: # * The value of this configuration option may not be greater than # the value specified by ``api_limit_max``. # * Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * api_limit_max # # (integer value) # Minimum value: 1 #limit_param_default = 25 # # Maximum number of results that could be returned by a request. # # As described in the help text of ``limit_param_default``, some # requests may return multiple results. The number of results to be # returned are governed either by the ``limit`` parameter in the # request or the ``limit_param_default`` configuration option. # The value in either case, can't be greater than the absolute maximum # defined by this configuration option. Anything greater than this # value is trimmed down to the maximum value defined here. # # NOTE: Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * limit_param_default # # (integer value) # Minimum value: 1 #api_limit_max = 1000 # # Show direct image location when returning an image. # # This configuration option indicates whether to show the direct image # location when returning image details to the user. The direct image # location is where the image data is stored in backend storage. This # image location is shown under the image property ``direct_url``. # # When multiple image locations exist for an image, the best location # is displayed based on the store weightage assigned for each store # indicated by the configuration option ``weight``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_multiple_locations`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_multiple_locations # * weight # # (boolean value) #show_image_direct_url = false # DEPRECATED: # Show all image locations when returning an image. # # This configuration option indicates whether to show all the image # locations when returning image details to the user. When multiple # image locations exist for an image, the locations are ordered based # on the store weightage assigned for each store indicated by the # configuration option ``weight``. The image locations are shown # under the image property ``locations``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more # information. # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_image_direct_url`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_image_direct_url # * weight # # (boolean value) # This option is deprecated for removal since Newton. # Its value may be silently ignored in the future. # Reason: Use of this option, deprecated since Newton, is a security risk and # will be removed once we figure out a way to satisfy those use cases that # currently require it. An earlier announcement that the same functionality can # be achieved with greater granularity by using policies is incorrect. You # cannot work around this option via policy configuration at the present time, # though that is the direction we believe the fix will take. Please keep an eye # on the Glance release notes to stay up to date on progress in addressing this # issue. #show_multiple_locations = false # # Calculate hash and checksum for the image. # # This configuration option indicates that /v2/images/{image_id}/locations # POST API will calculate hash and checksum of the image on the fly. # If False it will silently ignore the hash and checksum calculation. # # Possible values: # * True # * False # (boolean value) #do_secure_hash = true # # The number of times to retry when any operation fails. # (integer value) #http_retries = 3 # # Maximum size of image a user can upload in bytes. # # An image upload greater than the size mentioned here would result # in an image creation failure. This configuration option defaults to # 1099511627776 bytes (1 TiB). # # NOTES: # * This value should only be increased after careful # consideration and must be set less than or equal to # 8 EiB (9223372036854775808). # * This value must be set with careful consideration of the # backend storage capacity. Setting this to a very low value # may result in a large number of image failures. And, setting # this to a very large value may result in faster consumption # of storage. Hence, this must be set according to the nature of # images created and storage capacity available. # # Possible values: # * Any positive number less than or equal to 9223372036854775808 # # (integer value) # Minimum value: 1 # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # # Maximum amount of image storage per tenant. # # This enforces an upper limit on the cumulative storage consumed by all images # of a tenant across all stores. This is a per-tenant limit. # # The default unit for this configuration option is Bytes. However, storage # units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, # ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and # TeraBytes respectively. Note that there should not be any space between the # value and unit. Value ``0`` signifies no quota enforcement. Negative values # are invalid and result in errors. # # This has no effect if ``use_keystone_limits`` is enabled. # # Possible values: # * A string that is a valid concatenation of a non-negative integer # representing the storage value and an optional string literal # representing storage units as mentioned above. # # Related options: # * use_keystone_limits # # (string value) #user_storage_quota = 0 # # Utilize per-tenant resource limits registered in Keystone. # # Enabling this feature will cause Glance to retrieve limits set in keystone # for resource consumption and enforce them against API users. Before turning # this on, the limits need to be registered in Keystone or all quotas will be # considered to be zero, and thus reject all new resource requests. # # These per-tenant resource limits are independent from the static # global ones configured in this config file. If this is enabled, the # relevant static global limits will be ignored. # (boolean value) #use_keystone_limits = false # # Host address of the pydev server. # # Provide a string value representing the hostname or IP of the # pydev server to use for debugging. The pydev server listens for # debug connections on this address, facilitating remote debugging # in Glance. # # Possible values: # * Valid hostname # * Valid IP address # # Related options: # * None # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #pydev_worker_debug_host = localhost # # Port number that the pydev server will listen on. # # Provide a port number to bind the pydev server to. The pydev # process accepts debug connections on this port and facilitates # remote debugging in Glance. # # Possible values: # * A valid port number # # Related options: # * None # # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # DEPRECATED: # AES key for encrypting store location metadata. # # Provide a string value representing the AES cipher to use for # encrypting Glance store metadata. # # NOTE: The AES key to use must be set to a random string of length # 16, 24 or 32 bytes. # # Possible values: # * String value representing a valid AES key # # Related options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option doesnt serves the purpose of encryption of location metadata, # whereas it encrypts location url only for specific APIs. Also if enabled # this during an upgrade may disrupt existing deployments, as it does not # support/provide db upgrade script to encrypt existing location URLs. # Moreover, its functionality for encrypting location URLs is inconsistent # which is resulting in download failures. #metadata_encryption_key = # DEPRECATED: # Digest algorithm to use for digital signature. # # Provide a string value representing the digest algorithm to # use for generating digital signatures. By default, ``sha256`` # is used. # # To get a list of the available algorithms supported by the version # of OpenSSL on your platform, run the command: # ``openssl list-message-digest-algorithms``. # Examples are 'sha1', 'sha256', and 'sha512'. # # NOTE: ``digest_algorithm`` is not related to Glance's image signing # and verification. It is only used to sign the universally unique # identifier (UUID) as a part of the certificate file and key file # validation. # # Possible values: # * An OpenSSL message digest algorithm identifier # # Relation options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option has had no effect since the removal of native SSL support. #digest_algorithm = sha256 # # The URL provides location where the temporary data will be stored # # This option is for Glance internal use only. Glance will save the # image data uploaded by the user to 'staging' endpoint during the # image import process. # # This option does not change the 'staging' API endpoint by any means. # # NOTE: It is discouraged to use same path as [task]/work_dir # # NOTE: 'file://' is the only option # api_image_import flow will support for now. # # NOTE: The staging path must be on shared filesystem available to all # Glance API nodes. # # Possible values: # * String starting with 'file://' followed by absolute FS path # # Related options: # * [task]/work_dir # # (string value) #node_staging_uri = file:///tmp/staging/ # # List of enabled Image Import Methods # # 'glance-direct', 'copy-image' and 'web-download' are enabled by default. # 'glance-download' is available, but requires federated deployments. # # Related options: # * [DEFAULT]/node_staging_uri (list value) #enabled_import_methods = [glance-direct,web-download,copy-image] # # The URL to this worker. # # If this is set, other glance workers will know how to contact this one # directly if needed. For image import, a single worker stages the image # and other workers need to be able to proxy the import request to the # right one. # # If unset, this will be considered to be `public_endpoint`, which # normally would be set to the same value on all workers, effectively # disabling the proxying behavior. # # Possible values: # * A URL by which this worker is reachable from other workers # # Related options: # * public_endpoint # # (string value) #worker_self_reference_url = # DEPRECATED: # The relative path to sqlite file database that will be used for image cache # management. # # This is a relative path to the sqlite file database that tracks the age and # usage statistics of image cache. The path is relative to image cache base # directory, specified by the configuration option ``image_cache_dir``. # # This is a lightweight database with just one table. # # Possible values: # * A valid relative path to sqlite file database # # Related options: # * ``image_cache_dir`` # # (string value) # This option is deprecated for removal since Caracal (2024.1). # Its value may be silently ignored in the future. # Reason: # As centralized database will now be used for image cache management, the use # of `sqlite` database and driver will be dropped from 'E' (2025.1) # development cycle. #image_cache_sqlite_db = cache.db # # The driver to use for image cache management. # # This configuration option provides the flexibility to choose between the # different image-cache drivers available. An image-cache driver is responsible # for providing the essential functions of image-cache like write images to/read # images from cache, track age and usage of cached images, provide a list of # cached images, fetch size of the cache, queue images for caching and clean up # the cache, etc. # # The essential functions of a driver are defined in the base class # ``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing # and prospective) must implement this interface. Currently available drivers # are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they # store the information about cached images: # # * The ``centralized_db`` driver uses a central database (which will be common # for all glance nodes) to track the usage of cached images. # * The ``sqlite`` (deprecated) driver uses a sqlite database (which sits on # every glance node locally) to track the usage of cached images. # * The ``xattr`` driver uses the extended attributes of files to store this # information. It also requires a filesystem that sets ``atime`` on the files # when accessed. # # Deprecation warning: # * As centralized database will now be used for image cache management, the # use of `sqlite` database and driver will be dropped from 'E' (2025.1) # development cycle. # # Possible values: # * centralized_db # * sqlite # * xattr # # Related options: # * None # # (string value) # Possible values: # centralized_db - # sqlite - # xattr - #image_cache_driver = centralized_db # # The upper limit on cache size, in bytes, after which the cache-pruner cleans # up the image cache. # # NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a # hard limit beyond which the image cache would never grow. In fact, depending # on how often the cache-pruner runs and how quickly the cache fills, the image # cache can far exceed the size specified here very easily. Hence, care must be # taken to appropriately schedule the cache-pruner and in setting this limit. # # Glance caches an image when it is downloaded. Consequently, the size of the # image cache grows over time as the number of downloads increases. To keep the # cache size from becoming unmanageable, it is recommended to run the # cache-pruner as a periodic task. When the cache pruner is kicked off, it # compares the current size of image cache and triggers a cleanup if the image # cache grew beyond the size specified here. After the cleanup, the size of # cache is less than or equal to size specified here. # # Possible values: # * Any non-negative integer # # Related options: # * None # # (integer value) # Minimum value: 0 #image_cache_max_size = 10737418240 # # The amount of time, in seconds, an incomplete image remains in the cache. # # Incomplete images are images for which download is in progress. Please see the # description of configuration option ``image_cache_dir`` for more detail. # Sometimes, due to various reasons, it is possible the download may hang and # the incompletely downloaded image remains in the ``incomplete`` directory. # This configuration option sets a time limit on how long the incomplete images # should remain in the ``incomplete`` directory before they are cleaned up. # Once an incomplete image spends more time than is specified here, it'll be # removed by cache-cleaner on its next run. # # It is recommended to run cache-cleaner as a periodic task on the Glance API # nodes to keep the incomplete images from occupying disk space. # # Possible values: # * Any non-negative integer # # Related options: # * None # # (integer value) # Minimum value: 0 #image_cache_stall_time = 86400 # # Base directory for image cache. # # This is the location where image data is cached and served out of. All cached # images are stored directly under this directory. This directory also contains # three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. # # The ``incomplete`` subdirectory is the staging area for downloading images. An # image is first downloaded to this directory. When the image download is # successful it is moved to the base directory. However, if the download fails, # the partially downloaded image file is moved to the ``invalid`` subdirectory. # # The ``queue``subdirectory is used for queuing images for download. This is # used primarily by the cache-prefetcher, which can be scheduled as a periodic # task like cache-pruner and cache-cleaner, to cache images ahead of their # usage. # Upon receiving the request to cache an image, Glance touches a file in the # ``queue`` directory with the image id as the file name. The cache-prefetcher, # when running, polls for the files in ``queue`` directory and starts # downloading them in the order they were created. When the download is # successful, the zero-sized file is deleted from the ``queue`` directory. # If the download fails, the zero-sized file remains and it'll be retried the # next time cache-prefetcher runs. # # Possible values: # * A valid path # # Related options: # * ``image_cache_sqlite_db`` # # (string value) #image_cache_dir = # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format). (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # DEPRECATED: Uses logging handler designed to watch file system. When log file # is moved or removed this handler will open a new log file with specified path # instantaneously. It makes sense only if log_file option is specified and Linux # platform is used. This option is ignored if log_config_append is set. (boolean # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This function is known to have bene broken for long time, and depends # on the unmaintained library #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append is # set. (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol which # includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set. (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set. (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = false # DEPRECATED: Log output to Windows Event Log. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Windows support is no longer maintained. #use_eventlog = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set. (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval". (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when the # service was started) is used when scheduling the next rotation. (string value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files. (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size". (integer value) #max_logfile_size_mb = 200 # Log rotation type. (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message is # DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting. (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval. (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered. (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [glance_store] # # From glance.store # # DEPRECATED: # List of enabled Glance stores. # # Register the storage backends to use for storing disk images # as a comma separated list. The default stores enabled for # storing disk images with Glance are ``file`` and ``http``. # # Possible values: # * A comma separated list that could include: # * file # * http # * swift # * rbd # * cinder # * vmware # * s3 # # Related Options: # * default_store # # (list value) # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``enabled_backends`` which helps to configure multiple backend stores # of different schemes. # # This option is scheduled for removal in the U development # cycle. #stores = file,http # DEPRECATED: # The default scheme to use for storing images. # # Provide a string value representing the default scheme to use for # storing images. If not set, Glance uses ``file`` as the default # scheme to store images with the ``file`` store. # # NOTE: The value given for this configuration option must be a valid # scheme for a store registered with the ``stores`` configuration # option. # # Possible values: # * file # * filesystem # * http # * https # * swift # * swift+http # * swift+https # * swift+config # * rbd # * cinder # * vsphere # * s3 # # Related Options: # * stores # # (string value) # Possible values: # file - # filesystem - # http - # https - # swift - # swift+http - # swift+https - # swift+config - # rbd - # cinder - # vsphere - # s3 - # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``default_backend`` which acts similar to ``default_store`` config # option. # # This option is scheduled for removal in the U development # cycle. #default_store = file # # Information to match when looking for cinder in the service catalog. # # When the ``cinder_endpoint_template`` is not set and any of # ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, ``cinder_store_password`` is not set, # cinder store uses this information to lookup cinder endpoint from the service # catalog in the current context. ``cinder_os_region_name``, if set, is taken # into consideration to fetch the appropriate endpoint. # # The service catalog can be listed by the ``openstack catalog list`` command. # # Possible values: # * A string of of the following form: # ``::`` # At least ``service_type`` and ``interface`` should be specified. # ``service_name`` can be omitted. # # Related options: # * cinder_os_region_name # * cinder_endpoint_template # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_catalog_info = volumev3::publicURL # # Override service catalog lookup with template for cinder endpoint. # # When this option is set, this value is used to generate cinder endpoint, # instead of looking up from the service catalog. # This value is ignored if ``cinder_store_auth_address``, # ``cinder_store_user_name``, ``cinder_store_project_name``, and # ``cinder_store_password`` are specified. # # If this configuration option is set, ``cinder_catalog_info`` will be ignored. # # Possible values: # * URL template string for cinder endpoint, where ``%%(tenant)s`` is # replaced with the current tenant (project) name. # For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # * cinder_catalog_info # # (string value) #cinder_endpoint_template = # # Region name to lookup cinder service from the service catalog. # # This is used only when ``cinder_catalog_info`` is used for determining the # endpoint. If set, the lookup for cinder endpoint by this node is filtered to # the specified region. It is useful when multiple regions are listed in the # catalog. If this is not set, the endpoint is looked up from every region. # # Possible values: # * A string that is a valid region name. # # Related options: # * cinder_catalog_info # # (string value) # Deprecated group/name - [glance_store]/os_region_name #cinder_os_region_name = # # Location of a CA certificates file used for cinder client requests. # # The specified CA certificates file, if set, is used to verify cinder # connections via HTTPS endpoint. If the endpoint is HTTP, this value is # ignored. # ``cinder_api_insecure`` must be set to ``True`` to enable the verification. # # Possible values: # * Path to a ca certificates file # # Related options: # * cinder_api_insecure # # (string value) #cinder_ca_certificates_file = # # Number of cinderclient retries on failed http calls. # # When a call failed by any errors, cinderclient will retry the call up to the # specified times after sleeping a few seconds. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_http_retries = 3 # # Time period, in seconds, to wait for a cinder volume transition to # complete. # # When the cinder volume is created, deleted, or attached to the glance node to # read/write the volume data, the volume's state is changed. For example, the # newly created volume status changes from ``creating`` to ``available`` after # the creation process is completed. This specifies the maximum time to wait for # the status change. If a timeout occurs while waiting, or the status is changed # to an unexpected value (e.g. `error``), the image creation fails. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_state_transition_timeout = 300 # # Allow to perform insecure SSL requests to cinder. # # If this option is set to True, HTTPS endpoint connection is verified using the # CA certificates file specified by ``cinder_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * cinder_ca_certificates_file # # (boolean value) #cinder_api_insecure = false # # The address where the cinder authentication service is listening. # # When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, and ``cinder_store_password`` options are # specified, the specified values are always used for the authentication. # This is useful to hide the image volumes from users by storing them in a # project/tenant specific to the image service. It also enables users to share # the image volume among other projects under the control of glance's ACL. # # If either of these options are not set, the cinder endpoint is looked up # from the service catalog, and current context's user and project are used. # # Possible values: # * A valid authentication service address, for example: # ``http://openstack.example.org/identity/v2.0`` # # Related options: # * cinder_store_user_name # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_auth_address = # # User name to authenticate against cinder. # # This must be used with all the following non-domain-related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid user name # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_user_name = # # Domain of the user to authenticate against cinder. # # Possible values: # * A valid domain name for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_name # # (string value) #cinder_store_user_domain_name = Default # # Password for the user authenticating against cinder. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid password for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_password = # # Project name where the image volume is stored in cinder. # # If this configuration option is not set, the project in current context is # used. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid project name # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_name = # # Domain of the project where the image volume is stored in cinder. # # Possible values: # * A valid domain name of the project specified by # ``cinder_store_project_name`` # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_domain_name = Default # # Path to the rootwrap configuration file to use for running commands as root. # # The cinder store requires root privileges to operate the image volumes (for # connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). # The configuration file should allow the required commands by cinder store and # os-brick library. # # Possible values: # * Path to the rootwrap config file # # Related options: # * None # # (string value) #rootwrap_config = /etc/glance/rootwrap.conf # # Volume type that will be used for volume creation in cinder. # # Some cinder backends can have several volume types to optimize storage usage. # Adding this option allows an operator to choose a specific volume type # in cinder that can be optimized for images. # # If this is not set, then the default volume type specified in the cinder # configuration will be used for volume creation. # # Possible values: # * A valid volume type from cinder # # Related options: # * None # # NOTE: You cannot use an encrypted volume_type associated with an NFS backend. # An encrypted volume stored on an NFS backend will raise an exception whenever # glance_store tries to write or access image data stored in that volume. # Consult your Cinder administrator to determine an appropriate volume_type. # # (string value) #cinder_volume_type = # # If this is set to True, attachment of volumes for image transfer will # be aborted when multipathd is not running. Otherwise, it will fallback # to single path. # # Possible values: # * True or False # # Related options: # * cinder_use_multipath # # (boolean value) #cinder_enforce_multipath = false # # Flag to identify multipath is supported or not in the deployment. # # Set it to False if multipath is not supported. # # Possible values: # * True or False # # Related options: # * cinder_enforce_multipath # # (boolean value) #cinder_use_multipath = false # # Directory where the NFS volume is mounted on the glance node. # # Possible values: # # * A string representing absolute path of mount point. # (string value) #cinder_mount_point_base = /var/lib/glance/mnt # # If this is set to True, glance will perform an extend operation # on the attached volume. Only enable this option if the cinder # backend driver supports the functionality of extending online # (in-use) volumes. Supported from cinder microversion 3.42 and # onwards. By default, it is set to False. # # Possible values: # * True or False # # (boolean value) #cinder_do_extend_attached = false # # Directory to which the filesystem backend store writes images. # # Upon start up, Glance creates the directory if it doesn't already # exist and verifies write access to the user under which # ``glance-api`` runs. If the write access isn't available, a # ``BadStoreConfiguration`` exception is raised and the filesystem # store may not be available for adding new images. # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * A valid path to a directory # # Related options: # * ``filesystem_store_datadirs`` # * ``filesystem_store_file_perm`` # # (string value) #filesystem_store_datadir = /var/lib/glance/images # # List of directories and their priorities to which the filesystem # backend store writes images. # # The filesystem store can be configured to store images in multiple # directories as opposed to using a single directory specified by the # ``filesystem_store_datadir`` configuration option. When using # multiple directories, each directory can be given an optional # priority to specify the preference order in which they should # be used. Priority is an integer that is concatenated to the # directory path with a colon where a higher value indicates higher # priority. When two directories have the same priority, the directory # with most free space is used. When no priority is specified, it # defaults to zero. # # More information on configuring filesystem store with multiple store # directories can be found at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * List of strings of the following form: # * ``:`` # # Related options: # * ``filesystem_store_datadir`` # * ``filesystem_store_file_perm`` # # (multi valued) #filesystem_store_datadirs = # # Filesystem store metadata file. # # The path to a file which contains the metadata to be returned with any # location # associated with the filesystem store. Once this option is set, it is used for # new images created afterward only - previously existing images are not # affected. # # The file must contain a valid JSON object. The object should contain the keys # ``id`` and ``mountpoint``. The value for both keys should be a string. # # Possible values: # * A valid path to the store metadata file # # Related options: # * None # # (string value) #filesystem_store_metadata_file = # # File access permissions for the image files. # # Set the intended file access permissions for image data. This provides # a way to enable other services, e.g. Nova, to consume images directly # from the filesystem store. The users running the services that are # intended to be given access to could be made a member of the group # that owns the files created. Assigning a value less then or equal to # zero for this configuration option signifies that no changes be made # to the default permissions. This value will be decoded as an octal # digit. # # For more information, please refer the documentation at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # Possible values: # * A valid file access permission # * Zero # * Any negative integer # # Related options: # * None # # (integer value) #filesystem_store_file_perm = 0 # # Chunk size, in bytes. # # The chunk size used when reading or writing image files. Raising this value # may improve the throughput but it may also slightly increase the memory usage # when handling a large number of requests. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #filesystem_store_chunk_size = 65536 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the filesystem, the holes who can appear will automatically # be interpreted by the filesystem as null bytes, and do not really consume # your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #filesystem_thin_provisioning = false # # Path to the CA bundle file. # # This configuration option enables the operator to use a custom # Certificate Authority file to verify the remote server certificate. If # this option is set, the ``https_insecure`` option will be ignored and # the CA file specified will be used to authenticate the server # certificate and establish a secure connection to the server. # # Possible values: # * A valid path to a CA file # # Related options: # * https_insecure # # (string value) #https_ca_certificates_file = # # Set verification of the remote server certificate. # # This configuration option takes in a boolean value to determine # whether or not to verify the remote server certificate. If set to # True, the remote server certificate is not verified. If the option is # set to False, then the default CA truststore is used for verification. # # This option is ignored if ``https_ca_certificates_file`` is set. # The remote server certificate will then be verified using the file # specified using the ``https_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * https_ca_certificates_file # # (boolean value) #https_insecure = true # # The http/https proxy information to be used to connect to the remote # server. # # This configuration option specifies the http/https proxy information # that should be used to connect to the remote server. The proxy # information should be a key value pair of the scheme and proxy, for # example, http:10.0.0.1:3128. You can also specify proxies for multiple # schemes by separating the key value pairs with a comma, for example, # http:10.0.0.1:3128, https:10.0.0.1:1080. # # Possible values: # * A comma separated list of scheme:proxy pairs as described above # # Related options: # * None # # (dict value) #http_proxy_information = # # Size, in megabytes, to chunk RADOS images into. # # Provide an integer value representing the size in megabytes to chunk # Glance images into. The default chunk size is 8 megabytes. For optimal # performance, the value should be a power of two. # # When Ceph's RBD object storage system is used as the storage backend # for storing Glance images, the images are chunked into objects of the # size set using this option. These chunked objects are then stored # across the distributed block data store to use for Glance. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #rbd_store_chunk_size = 8 # # RADOS pool in which images are stored. # # When RBD is used as the storage backend for storing Glance images, the # images are stored by means of logical grouping of the objects (chunks # of images) into a ``pool``. Each pool is defined with the number of # placement groups it can contain. The default pool that is used is # 'images'. # # More information on the RBD storage backend can be found here: # http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ # # Possible Values: # * A valid pool name # # Related options: # * None # # (string value) #rbd_store_pool = images # # RADOS user to authenticate as. # # This configuration option takes in the RADOS user to authenticate as. # This is only needed when RADOS authentication is enabled and is # applicable only if the user is using Cephx authentication. If the # value for this option is not set by the user or is set to None, a # default value will be chosen, which will be based on the client. # section in rbd_store_ceph_conf. # # Possible Values: # * A valid RADOS user # # Related options: # * rbd_store_ceph_conf # # (string value) #rbd_store_user = # # Ceph configuration file path. # # This configuration option specifies the path to the Ceph configuration # file to be used. If the value for this option is not set by the user # or is set to the empty string, librados will read the standard ceph.conf # file by searching the default Ceph configuration file locations in # sequential order. See the Ceph documentation for details. # # NOTE: If using Cephx authentication, this file should include a reference # to the right keyring in a client. section # # NOTE 2: If you leave this option empty (the default), the actual Ceph # configuration file used may change depending on what version of librados # is being used. If it is important for you to know exactly which configuration # file is in effect, you may specify that file here using this option. # # Possible Values: # * A valid path to a configuration file # # Related options: # * rbd_store_user # # (string value) #rbd_store_ceph_conf = # # Timeout value for connecting to Ceph cluster. # # This configuration option takes in the timeout value in seconds used # when connecting to the Ceph cluster i.e. it sets the time to wait for # glance-api before closing the connection. This prevents glance-api # hangups during the connection to RBD. If the value for this option # is set to less than 0, no timeout is set and the default librados value # is used. # # Possible Values: # * Any integer value # # Related options: # * None # # (integer value) #rados_connect_timeout = -1 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the RBD backend, the holes who can appear will automatically # be interpreted by Ceph as null bytes, and do not really consume your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #rbd_thin_provisioning = false # # The host where the S3 server is listening. # # This configuration option sets the host of the S3 or S3 compatible storage # Server. This option is required when using the S3 storage backend. # The host can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com) # or an IP address (127.0.0.1). # # Possible values: # * A valid DNS name # * A valid IPv4 address # # Related Options: # * s3_store_access_key # * s3_store_secret_key # # (string value) #s3_store_host = # # The S3 region name. # # This parameter will set the region_name used by boto. # If this parameter is not set, we we will try to compute it from the # s3_store_host. # # Possible values: # * A valid region name # # Related Options: # * s3_store_host # # (string value) #s3_store_region_name = # # The S3 query token access key. # # This configuration option takes the access key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is the access key for a user with appropriate # privileges # # Related Options: # * s3_store_host # * s3_store_secret_key # # (string value) #s3_store_access_key = # # The S3 query token secret key. # # This configuration option takes the secret key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is a secret key corresponding to the access key # specified using the ``s3_store_host`` option # # Related Options: # * s3_store_host # * s3_store_access_key # # (string value) #s3_store_secret_key = # # The S3 bucket to be used to store the Glance data. # # This configuration option specifies where the glance images will be stored # in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be # created automatically even if the bucket does not exist. # # Possible values: # * Any string value # # Related Options: # * s3_store_create_bucket_on_put # * s3_store_bucket_url_format # # (string value) #s3_store_bucket = # # Determine whether S3 should create a new bucket. # # This configuration option takes boolean value to indicate whether Glance # should # create a new bucket to S3 if it does not exist. # # Possible values: # * Any Boolean value # # Related Options: # * None # # (boolean value) #s3_store_create_bucket_on_put = false # # The S3 calling format used to determine the object. # # This configuration option takes access model that is used to specify the # address of an object in an S3 bucket. # # NOTE: # In ``path``-style, the endpoint for the object looks like # 'https://s3.amazonaws.com/bucket/example.img'. # And in ``virtual``-style, the endpoint for the object looks like # 'https://bucket.s3.amazonaws.com/example.img'. # If you do not follow the DNS naming convention in the bucket name, you can # get objects in the path style, but not in the virtual style. # # Possible values: # * Any string value of ``auto``, ``virtual``, or ``path`` # # Related Options: # * s3_store_bucket # # (string value) #s3_store_bucket_url_format = auto # # What size, in MB, should S3 start chunking image files and do a multipart # upload in S3. # # This configuration option takes a threshold in MB to determine whether to # upload the image to S3 as is or to split it (Multipart Upload). # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_chunk_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_size = 100 # # What multipart upload part size, in MB, should S3 use when uploading parts. # # This configuration option takes the image split size in MB for Multipart # Upload. # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value (must be greater than or equal to 5M) # # Related Options: # * s3_store_large_object_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_chunk_size = 10 # # The number of thread pools to perform a multipart upload in S3. # # This configuration option takes the number of thread pools when performing a # Multipart Upload. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_size # * s3_store_large_object_chunk_size # # (integer value) #s3_store_thread_pools = 10 # # Set verification of the server certificate. # # This boolean determines whether or not to verify the server # certificate. If this option is set to True, swiftclient won't check # for a valid SSL certificate when authenticating. If the option is set # to False, then the default CA truststore is used for verification. # # Possible values: # * True # * False # # Related options: # * swift_store_cacert # # (boolean value) #swift_store_auth_insecure = false # # Path to the CA bundle file. # # This configuration option enables the operator to specify the path to # a custom Certificate Authority file for SSL verification when # connecting to Swift. # # Possible values: # * A valid path to a CA file # # Related options: # * swift_store_auth_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_cacert = /etc/ssl/certs/ca-certificates.crt # # The region of Swift endpoint to use by Glance. # # Provide a string value representing a Swift region where Glance # can connect to for image storage. By default, there is no region # set. # # When Glance uses Swift as the storage backend to store images # for a specific tenant that has multiple endpoints, setting of a # Swift region with ``swift_store_region`` allows Glance to connect # to Swift in the specified region as opposed to a single region # connectivity. # # This option can be configured for both single-tenant and # multi-tenant storage. # # NOTE: Setting the region with ``swift_store_region`` is # tenant-specific and is necessary ``only if`` the tenant has # multiple endpoints across different regions. # # Possible values: # * A string value representing a valid Swift region. # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_region = RegionTwo # # The URL endpoint to use for Swift backend storage. # # Provide a string value representing the URL endpoint to use for # storing Glance images in Swift store. By default, an endpoint # is not set and the storage URL returned by ``auth`` is used. # Setting an endpoint with ``swift_store_endpoint`` overrides the # storage URL and is used for Glance image storage. # # NOTE: The URL should include the path up to, but excluding the # container. The location of an object is obtained by appending # the container and object to the configured URL. # # Possible values: # * String value representing a valid URL path up to a Swift container # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name # # Endpoint Type of Swift service. # # This string value indicates the endpoint type to use to fetch the # Swift endpoint. The endpoint type determines the actions the user will # be allowed to perform, for instance, reading and writing to the Store. # This setting is only used if swift_store_auth_version is greater than # 1. # # Possible values: # * publicURL # * adminURL # * internalURL # # Related options: # * swift_store_endpoint # # (string value) # Possible values: # publicURL - # adminURL - # internalURL - #swift_store_endpoint_type = publicURL # # Type of Swift service to use. # # Provide a string value representing the service type to use for # storing images while using Swift backend storage. The default # service type is set to ``object-store``. # # NOTE: If ``swift_store_auth_version`` is set to 2, the value for # this configuration option needs to be ``object-store``. If using # a higher version of Keystone or a different auth scheme, this # option may be modified. # # Possible values: # * A string representing a valid service type for Swift storage. # # Related Options: # * None # # (string value) #swift_store_service_type = object-store # # Name of single container to store images/name prefix for multiple containers # # When a single container is being used to store images, this configuration # option indicates the container within the Glance account to be used for # storing all images. When multiple containers are used to store images, this # will be the name prefix for all containers. Usage of single/multiple # containers can be controlled using the configuration option # ``swift_store_multiple_containers_seed``. # # When using multiple containers, the containers will be named after the value # set for this configuration option with the first N chars of the image UUID # as the suffix delimited by an underscore (where N is specified by # ``swift_store_multiple_containers_seed``). # # Example: if the seed is set to 3 and swift_store_container = ``glance``, then # an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in # the container ``glance_fda``. All dashes in the UUID are included when # creating the container name but do not count toward the character limit, so # when N=10 the container name would be ``glance_fdae39a1-ba.`` # # Possible values: # * If using single container, this configuration option can be any string # that is a valid swift container name in Glance's Swift account # * If using multiple containers, this configuration option can be any # string as long as it satisfies the container naming rules enforced by # Swift. The value of ``swift_store_multiple_containers_seed`` should be # taken into account as well. # # Related options: # * ``swift_store_multiple_containers_seed`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (string value) #swift_store_container = glance # # The size threshold, in MB, after which Glance will start segmenting image # data. # # Swift has an upper limit on the size of a single uploaded object. By default, # this is 5GB. To upload objects bigger than this limit, objects are segmented # into multiple smaller objects that are tied together with a manifest file. # For more detail, refer to # https://docs.openstack.org/swift/latest/overview_large_objects.html # # This configuration option specifies the size threshold over which the Swift # driver will start segmenting image data into multiple smaller files. # Currently, the Swift driver only supports creating Dynamic Large Objects. # # NOTE: This should be set by taking into account the large object limit # enforced by the Swift cluster in consideration. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by the Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_chunk_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_size = 5120 # # The maximum size, in MB, of the segments when image data is segmented. # # When image data is segmented to upload images that are larger than the limit # enforced by the Swift cluster, image data is broken into segments that are no # bigger than the size specified by this configuration option. # Refer to ``swift_store_large_object_size`` for more detail. # # For example: if ``swift_store_large_object_size`` is 5GB and # ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be # segmented into 7 segments where the first six segments will be 1GB in size and # the seventh segment will be 0.2GB. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_chunk_size = 200 # # Create container, if it doesn't already exist, when uploading image. # # At the time of uploading an image, if the corresponding container doesn't # exist, it will be created provided this configuration option is set to True. # By default, it won't be created. This behavior is applicable for both single # and multiple containers mode. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #swift_store_create_container_on_put = false # # Store images in tenant's Swift account. # # This enables multi-tenant storage mode which causes Glance images to be stored # in tenant specific Swift accounts. If this is disabled, Glance stores all # images in its own account. More details multi-tenant store can be found at # https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage # # NOTE: If using multi-tenant swift store, please make sure # that you do not set a swift configuration file with the # 'swift_store_config_file' option. # # Possible values: # * True # * False # # Related options: # * swift_store_config_file # # (boolean value) #swift_store_multi_tenant = false # # Seed indicating the number of containers to use for storing images. # # When using a single-tenant store, images can be stored in one or more than one # containers. When set to 0, all images will be stored in one single container. # When set to an integer value between 1 and 32, multiple containers will be # used to store images. This configuration option will determine how many # containers are created. The total number of containers that will be used is # equal to 16^N, so if this config option is set to 2, then 16^2=256 containers # will be used to store images. # # Please refer to ``swift_store_container`` for more detail on the naming # convention. More detail about using multiple containers can be found at # https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- # multiple-containers.html # # NOTE: This is used only when swift_store_multi_tenant is disabled. # # Possible values: # * A non-negative integer less than or equal to 32 # # Related options: # * ``swift_store_container`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (integer value) # Minimum value: 0 # Maximum value: 32 #swift_store_multiple_containers_seed = 0 # # List of tenants that will be granted admin access. # # This is a list of tenants that will be granted read/write access on # all Swift containers created by Glance in multi-tenant mode. The # default value is an empty list. # # Possible values: # * A comma separated list of strings representing UUIDs of Keystone # projects/tenants # # Related options: # * None # # (list value) #swift_store_admin_tenants = # # SSL layer compression for HTTPS Swift requests. # # Provide a boolean value to determine whether or not to compress # HTTPS Swift requests for images at the SSL layer. By default, # compression is enabled. # # When using Swift as the backend store for Glance image storage, # SSL layer compression of HTTPS Swift requests can be set using # this option. If set to False, SSL layer compression of HTTPS # Swift requests is disabled. Disabling this option may improve # performance for images which are already in a compressed format, # for example, qcow2. # # Possible values: # * True # * False # # Related Options: # * None # # (boolean value) #swift_store_ssl_compression = true # # The number of times a Swift download will be retried before the # request fails. # # Provide an integer value representing the number of times an image # download must be retried before erroring out. The default value is # zero (no retry on a failed image download). When set to a positive # integer value, ``swift_store_retry_get_count`` ensures that the # download is attempted this many more times upon a download failure # before sending an error message. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_retry_get_count = 0 # # Time in seconds defining the size of the window in which a new # token may be requested before the current token is due to expire. # # Typically, the Swift storage driver fetches a new token upon the # expiration of the current token to ensure continued access to # Swift. However, some Swift transactions (like uploading image # segments) may not recover well if the token expires on the fly. # # Hence, by fetching a new token before the current token expiration, # we make sure that the token does not expire or is close to expiry # before a transaction is attempted. By default, the Swift storage # driver requests for a new token 60 seconds or less before the # current token expiration. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_expire_soon_interval = 60 # # Use trusts for multi-tenant Swift store. # # This option instructs the Swift store to create a trust for each # add/get request when the multi-tenant store is in use. Using trusts # allows the Swift store to avoid problems that can be caused by an # authentication token expiring during the upload or download of data. # # By default, ``swift_store_use_trusts`` is set to ``True``(use of # trusts is enabled). If set to ``False``, a user token is used for # the Swift connection instead, eliminating the overhead of trust # creation. # # NOTE: This option is considered only when # ``swift_store_multi_tenant`` is set to ``True`` # # Possible values: # * True # * False # # Related options: # * swift_store_multi_tenant # # (boolean value) #swift_store_use_trusts = true # # Buffer image segments before upload to Swift. # # Provide a boolean value to indicate whether or not Glance should # buffer image data to disk while uploading to swift. This enables # Glance to resume uploads on error. # # NOTES: # When enabling this option, one should take great care as this # increases disk usage on the API node. Be aware that depending # upon how the file system is configured, the disk space used # for buffering may decrease the actual disk space available for # the glance image cache. Disk utilization will cap according to # the following equation: # (``swift_store_large_object_chunk_size`` * ``workers`` * 1000) # # Possible values: # * True # * False # # Related options: # * swift_upload_buffer_dir # # (boolean value) #swift_buffer_on_upload = false # # Reference to default Swift account/backing store parameters. # # Provide a string value representing a reference to the default set # of parameters required for using swift account/backing store for # image storage. The default reference value for this configuration # option is 'ref1'. This configuration option dereferences the # parameters and facilitates image storage in Swift storage backend # every time a new image is added. # # Possible values: # * A valid string value # # Related options: # * None # # (string value) #default_swift_reference = ref1 # DEPRECATED: Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_version' in the Swift back-end configuration file is # used instead. #swift_store_auth_version = 2 # DEPRECATED: The address where the Swift authentication service is listening. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_address' in the Swift back-end configuration file is # used instead. #swift_store_auth_address = # DEPRECATED: The user to authenticate against the Swift authentication service. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'user' in the Swift back-end configuration file is set instead. #swift_store_user = # DEPRECATED: Auth key for the user authenticating against the Swift # authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'key' in the Swift back-end configuration file is used # to set the authentication key instead. #swift_store_key = # # Absolute path to the file containing the swift account(s) # configurations. # # Include a string value representing the path to a configuration # file that has references for each of the configured Swift # account(s)/backing stores. By default, no file path is specified # and customized Swift referencing is disabled. Configuring this # option is highly recommended while using Swift storage backend for # image storage as it avoids storage of credentials in the database. # # NOTE: Please do not configure this option if you have set # ``swift_store_multi_tenant`` to ``True``. # # Possible values: # * String value representing an absolute path on the glance-api # node # # Related options: # * swift_store_multi_tenant # # (string value) #swift_store_config_file = # # Directory to buffer image segments before upload to Swift. # # Provide a string value representing the absolute path to the # directory on the glance node where image segments will be # buffered briefly before they are uploaded to swift. # # NOTES: # * This is required only when the configuration option # ``swift_buffer_on_upload`` is set to True. # * This directory should be provisioned keeping in mind the # ``swift_store_large_object_chunk_size`` and the maximum # number of images that could be uploaded simultaneously by # a given glance node. # # Possible values: # * String value representing an absolute directory path # # Related options: # * swift_buffer_on_upload # * swift_store_large_object_chunk_size # # (string value) #swift_upload_buffer_dir = # # Address of the ESX/ESXi or vCenter Server target system. # # This configuration option sets the address of the ESX/ESXi or vCenter # Server target system. This option is required when using the VMware # storage backend. The address can contain an IP address (127.0.0.1) or # a DNS name (www.my-domain.com). # # Possible Values: # * A valid IPv4 or IPv6 address # * A valid DNS name # # Related options: # * vmware_server_username # * vmware_server_password # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_host = 127.0.0.1 # # Server username. # # This configuration option takes the username for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is the username for a user with appropriate # privileges # # Related options: # * vmware_server_host # * vmware_server_password # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_username = root # # Server password. # # This configuration option takes the password for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is a password corresponding to the username # specified using the "vmware_server_username" option # # Related options: # * vmware_server_host # * vmware_server_username # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_password = vmware # # The number of VMware API retries. # # This configuration option specifies the number of times the VMware # ESX/VC server API must be retried upon connection related issues or # server API call overload. It is not possible to specify 'retry # forever'. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_api_retry_count = 10 # # Interval in seconds used for polling remote tasks invoked on VMware # ESX/VC server. # # This configuration option takes in the sleep time in seconds for polling an # on-going async task as part of the VMWare ESX/VC server API call. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_task_poll_interval = 5 # # The directory where the glance images will be stored in the datastore. # # This configuration option specifies the path to the directory where the # glance images will be stored in the VMware datastore. If this option # is not set, the default directory where the glance images are stored # is openstack_glance. # # Possible Values: # * Any string that is a valid path to a directory # # Related options: # * None # # (string value) #vmware_store_image_dir = /openstack_glance # # Set verification of the ESX/vCenter server certificate. # # This configuration option takes a boolean value to determine # whether or not to verify the ESX/vCenter server certificate. If this # option is set to True, the ESX/vCenter server certificate is not # verified. If this option is set to False, then the default CA # truststore is used for verification. # # This option is ignored if the "vmware_ca_file" option is set. In that # case, the ESX/vCenter server certificate will then be verified using # the file specified using the "vmware_ca_file" option . # # Possible Values: # * True # * False # # Related options: # * vmware_ca_file # # (boolean value) # Deprecated group/name - [glance_store]/vmware_api_insecure #vmware_insecure = false # # Absolute path to the CA bundle file. # # This configuration option enables the operator to use a custom # Cerificate Authority File to verify the ESX/vCenter certificate. # # If this option is set, the "vmware_insecure" option will be ignored # and the CA file specified will be used to authenticate the ESX/vCenter # server certificate and establish a secure connection to the server. # # Possible Values: # * Any string that is a valid absolute path to a CA file # # Related options: # * vmware_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_ca_file = /etc/ssl/certs/ca-certificates.crt # # The datastores where the image can be stored. # # This configuration option specifies the datastores where the image can # be stored in the VMWare store backend. This option may be specified # multiple times for specifying multiple datastores. The datastore name # should be specified after its datacenter path, separated by ":". An # optional weight may be given after the datastore name, separated again # by ":" to specify the priority. Thus, the required format becomes # ::. # # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases # where the image size is already known. If no weight is given, it is # assumed to be zero and the directory will be considered for selection # last. If multiple datastores have the same weight, then the one with # the most free space available is selected. # # Possible Values: # * Any string of the format: # :: # # Related options: # * None # # (multi valued) #vmware_datastores = [os_brick] # # From os_brick # # Directory to use for os-brick lock files. Defaults to # oslo_concurrency.lock_path which is a sensible default for compute nodes, but # not for HCI deployments or controllers where Glance uses Cinder as a backend, # as locks should use the same directory. (string value) #lock_path = [oslo_policy] # # From oslo.policy # # This option controls whether or not to enforce scope when evaluating policies. # If ``True``, the scope of the token used in the request is compared to the # ``scope_types`` of the policy being enforced. If the scopes do not match, an # ``InvalidScope`` exception will be raised. If ``False``, a message will be # logged informing operators that policies are being invoked with mismatching # scope. (boolean value) #enforce_scope = true # This option controls whether or not to use old deprecated defaults when # evaluating policies. If ``True``, the old deprecated defaults are not going to # be evaluated. This means if any existing token is allowed for old defaults but # is disallowed for new defaults, it will be disallowed. It is encouraged to # enable this flag along with the ``enforce_scope`` flag so that you can get the # benefits of new defaults and ``scope_type`` together. If ``False``, the # deprecated policy check string is logically OR'd with the new policy check # string, allowing for a graceful upgrade experience between releases with new # policies, which is the default behavior. (boolean value) #enforce_new_defaults = true # The relative or absolute path of a file that maps roles to permissions for a # given service. Relative paths must be specified in relation to the # configuration file setting this option. (string value) #policy_file = policy.yaml # Default rule. Enforced when a requested rule is not found. (string value) #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored. (multi # valued) #policy_dirs = policy.d # Content Type to send and receive data for REST based policy check (string # value) # Possible values: # application/x-www-form-urlencoded - # application/json - #remote_content_type = application/x-www-form-urlencoded # server identity verification for REST based policy check (boolean value) #remote_ssl_verify_server_crt = false # Absolute path to ca cert file for REST based policy check (string value) #remote_ssl_ca_crt_file = # Absolute path to client cert for REST based policy check (string value) #remote_ssl_client_crt_file = # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-image-import.conf.sample0000664000175000017500000002033100000000000021600 0ustar00zuulzuul00000000000000[DEFAULT] [glance_download_opts] # # From glance # # # Specify metadata prefix to be set on the target image when using # glance-download. All other properties coming from the source image won't be # set # on the target image. If specified metadata does not exist on the source image # it won't be set on the target image. Note you can't set the os_glance prefix # as it is reserved by glance, so the related properties won't be set on the # target image. # # Possible values: # * List containing extra_properties prefixes: ['os_', 'architecture'] # # (list value) #extra_properties = [hw_,trait:,os_distro,os_secure_boot,os_type] [image_conversion] # # From glance # # # Desired output format for image conversion plugin. # # Provide a valid image format to which the conversion plugin # will convert the image before storing it to the back-end. # # Note, if the Image Conversion plugin for image import is defined, users # should only upload disk formats that are supported by `quemu-img` otherwise # the conversion and import will fail. # # Possible values: # * qcow2 # * raw # * vmdk # # Related Options: # * disk_formats # (string value) # Possible values: # qcow2 - # raw - # vmdk - #output_format = raw [image_import_opts] # # From glance # # # Image import plugins to be enabled for task processing. # # Provide list of strings reflecting to the task Objects # that should be included to the Image Import flow. The # task objects needs to be defined in the 'glance/async/ # flows/plugins/*' and may be implemented by OpenStack # Glance project team, deployer or 3rd party. # # By default no plugins are enabled and to take advantage # of the plugin model the list of plugins must be set # explicitly in the glance-image-import.conf file. # # The allowed values for this option is comma separated # list of object names in between ``[`` and ``]``. # # Possible values: # * no_op (only logs debug level message that the # plugin has been executed) # * Any provided Task object name to be included # in to the flow. # (list value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #image_import_plugins = [no_op] [import_filtering_opts] # # From glance # # # Specify the "whitelist" of allowed url schemes for web-download. # # This option provides whitelisting of uri schemes that will be allowed when # an end user imports an image using the web-download import method. The # whitelist has priority such that if there is also a blacklist defined for # schemes, the blacklist will be ignored. Host and port filtering, however, # will be applied. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing normalized url schemes as they are returned from # urllib.parse. For example ['ftp','https'] # * Hint: leave the whitelist empty if you want the disallowed_schemes # blacklist to be processed # # Related options: # * disallowed_schemes # * allowed_hosts # * disallowed_hosts # * allowed_ports # * disallowed_ports # # (list value) #allowed_schemes = [http,https] # # Specify the "blacklist" of uri schemes disallowed for web-download. # # This option provides blacklisting of uri schemes that will be rejected when # an end user imports an image using the web-download import method. Note # that if a scheme whitelist is defined using the 'allowed_schemes' option, # *this option will be ignored*. Host and port filtering, however, will be # applied. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing normalized url schemes as they are returned from # urllib.parse. For example ['ftp','https'] # * By default the list is empty # # Related options: # * allowed_schemes # * allowed_hosts # * disallowed_hosts # * allowed_ports # * disallowed_ports # # (list value) #disallowed_schemes = [] # # Specify the "whitelist" of allowed target hosts for web-download. # # This option provides whitelisting of hosts that will be allowed when an end # user imports an image using the web-download import method. The whitelist # has priority such that if there is also a blacklist defined for hosts, the # blacklist will be ignored. The uri must have already passed scheme # filtering before this host filter will be applied. If the uri passes, port # filtering will then be applied. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing normalized hostname or ip like it would be returned # in the urllib.parse netloc without the port # * By default the list is empty # * Hint: leave the whitelist empty if you want the disallowed_hosts # blacklist to be processed # # Related options: # * allowed_schemes # * disallowed_schemes # * disallowed_hosts # * allowed_ports # * disallowed_ports # # (list value) #allowed_hosts = [] # # Specify the "blacklist" of hosts disallowed for web-download. # # This option provides blacklisting of hosts that will be rejected when an end # user imports an image using the web-download import method. Note that if a # host whitelist is defined using the 'allowed_hosts' option, *this option # will be ignored*. # # The uri must have already passed scheme filtering before this host filter # will be applied. If the uri passes, port filtering will then be applied. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing normalized hostname or ip like it would be returned # in the urllib.parse netloc without the port # * By default the list is empty # # Related options: # * allowed_schemes # * disallowed_schemes # * allowed_hosts # * allowed_ports # * disallowed_ports # # (list value) #disallowed_hosts = [] # # Specify the "whitelist" of allowed ports for web-download. # # This option provides whitelisting of ports that will be allowed when an end # user imports an image using the web-download import method. The whitelist # has priority such that if there is also a blacklist defined for ports, the # blacklist will be ignored. Note that scheme and host filtering have already # been applied by the time a uri hits the port filter. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing ports as they are returned from urllib.parse netloc # field. Thus the value is a list of integer values, for example # [80, 443] # * Hint: leave the whitelist empty if you want the disallowed_ports # blacklist to be processed # # Related options: # * allowed_schemes # * disallowed_schemes # * allowed_hosts # * disallowed_hosts # * disallowed_ports # (list value) #allowed_ports = [80,443] # # Specify the "blacklist" of disallowed ports for web-download. # # This option provides blacklisting of target ports that will be rejected when # an end user imports an image using the web-download import method. Note # that if a port whitelist is defined using the 'allowed_ports' option, *this # option will be ignored*. Note that scheme and host filtering have already # been applied by the time a uri hits the port filter. # # See the Glance Administration Guide for more information. # # Possible values: # * List containing ports as they are returned from urllib.parse netloc # field. Thus the value is a list of integer values, for example # [22, 88] # * By default this list is empty # # Related options: # * allowed_schemes # * disallowed_schemes # * allowed_hosts # * disallowed_hosts # * allowed_ports # # (list value) #disallowed_ports = [] [inject_metadata_properties] # # From glance # # # Specify name of user roles to be ignored for injecting metadata # properties in the image. # # Possible values: # * List containing user roles. For example: [admin,member] # # (list value) #ignore_user_roles = admin # # Dictionary contains metadata properties to be injected in image. # # Possible values: # * Dictionary containing key/value pairs. Key characters # length should be <= 255. For example: k1:v1,k2:v2 # # # (dict value) #inject = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-manage.conf0000664000175000017500000002360200000000000017162 0ustar00zuulzuul00000000000000[DEFAULT] # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format). (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # DEPRECATED: Uses logging handler designed to watch file system. When log file # is moved or removed this handler will open a new log file with specified path # instantaneously. It makes sense only if log_file option is specified and Linux # platform is used. This option is ignored if log_config_append is set. (boolean # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This function is known to have bene broken for long time, and depends # on the unmaintained library #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append is # set. (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol which # includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set. (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set. (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = false # DEPRECATED: Log output to Windows Event Log. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Windows support is no longer maintained. #use_eventlog = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set. (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval". (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when the # service was started) is used when scheduling the next rotation. (string value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files. (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size". (integer value) #max_logfile_size_mb = 200 # Log rotation type. (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message is # DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting. (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval. (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered. (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [database] # # From oslo.db # # If True, SQLite uses synchronous mode. (boolean value) #sqlite_synchronous = true # The back end to use for the database. (string value) #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. (string # value) #connection = # The SQLAlchemy connection string to use to connect to the slave database. # (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set by # the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # For Galera only, configure wsrep_sync_wait causality checks on new # connections. Default is None, meaning don't configure any setting. (integer # value) #mysql_wsrep_sync_wait = # Connections which have been present in the connection pool longer than this # number of seconds will be replaced with a new one the next time they are # checked out from the pool. (integer value) #connection_recycle_time = 3600 # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 # indicates no limit. (integer value) #max_pool_size = 5 # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count. (integer value) #max_retries = 10 # Interval between retries of opening a SQL connection. (integer value) #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer value) #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer # value) # Minimum value: 0 # Maximum value: 100 #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer value) #pool_timeout = # Enable the experimental use of database reconnect on connection lost. (boolean # value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count. (integer value) #db_max_retries = 20 # Optional URL parameters to append onto the connection URL at connect time; # specify as param1=value1¶m2=value2&... (string value) #connection_parameters = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-policy-generator.conf0000664000175000017500000000010200000000000021203 0ustar00zuulzuul00000000000000[DEFAULT] namespace = glance output_file = etc/policy.yaml.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-scrubber.conf0000664000175000017500000025146500000000000017553 0ustar00zuulzuul00000000000000[DEFAULT] # # From glance.scrubber # # # Secure hashing algorithm used for computing the 'os_hash_value' property. # # This option configures the Glance "multihash", which consists of two # image properties: the 'os_hash_algo' and the 'os_hash_value'. The # 'os_hash_algo' will be populated by the value of this configuration # option, and the 'os_hash_value' will be populated by the hexdigest computed # when the algorithm is applied to the uploaded or imported image data. # # The value must be a valid secure hash algorithm name recognized by the # python 'hashlib' library. You can determine what these are by examining # the 'hashlib.algorithms_available' data member of the version of the # library being used in your Glance installation. For interoperability # purposes, however, we recommend that you use the set of secure hash # names supplied by the 'hashlib.algorithms_guaranteed' data member because # those algorithms are guaranteed to be supported by the 'hashlib' library # on all platforms. Thus, any image consumer using 'hashlib' locally should # be able to verify the 'os_hash_value' of the image. # # The default value of 'sha512' is a performant secure hash algorithm. # # If this option is misconfigured, any attempts to store image data will fail. # For that reason, we recommend using the default value. # # Possible values: # * Any secure hash algorithm name recognized by the Python 'hashlib' # library # # Related options: # * None # # (string value) #hashing_algorithm = sha512 # # Maximum number of image members per image. # # This limits the maximum of users an image can be shared with. Any negative # value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_member_quota = 128 # # Maximum number of properties allowed on an image. # # This enforces an upper limit on the number of additional properties an image # can have. Any negative value is interpreted as unlimited. # # (integer value) #image_property_quota = 128 # # Maximum number of tags allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_tag_quota = 128 # # Maximum number of locations allowed on an image. # # Any negative value is interpreted as unlimited. # # Related options: # * None # # (integer value) #image_location_quota = 10 # # The default number of results to return for a request. # # Responses to certain API requests, like list images, may return # multiple items. The number of results returned can be explicitly # controlled by specifying the ``limit`` parameter in the API request. # However, if a ``limit`` parameter is not specified, this # configuration value will be used as the default number of results to # be returned for any API request. # # NOTES: # * The value of this configuration option may not be greater than # the value specified by ``api_limit_max``. # * Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * api_limit_max # # (integer value) # Minimum value: 1 #limit_param_default = 25 # # Maximum number of results that could be returned by a request. # # As described in the help text of ``limit_param_default``, some # requests may return multiple results. The number of results to be # returned are governed either by the ``limit`` parameter in the # request or the ``limit_param_default`` configuration option. # The value in either case, can't be greater than the absolute maximum # defined by this configuration option. Anything greater than this # value is trimmed down to the maximum value defined here. # # NOTE: Setting this to a very large value may slow down database # queries and increase response times. Setting this to a # very low value may result in poor user experience. # # Possible values: # * Any positive integer # # Related options: # * limit_param_default # # (integer value) # Minimum value: 1 #api_limit_max = 1000 # # Show direct image location when returning an image. # # This configuration option indicates whether to show the direct image # location when returning image details to the user. The direct image # location is where the image data is stored in backend storage. This # image location is shown under the image property ``direct_url``. # # When multiple image locations exist for an image, the best location # is displayed based on the store weightage assigned for each store # indicated by the configuration option ``weight``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_multiple_locations`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_multiple_locations # * weight # # (boolean value) #show_image_direct_url = false # DEPRECATED: # Show all image locations when returning an image. # # This configuration option indicates whether to show all the image # locations when returning image details to the user. When multiple # image locations exist for an image, the locations are ordered based # on the store weightage assigned for each store indicated by the # configuration option ``weight``. The image locations are shown # under the image property ``locations``. # # NOTES: # * Revealing image locations can present a GRAVE SECURITY RISK as # image locations can sometimes include credentials. Hence, this # is set to ``False`` by default. Set this to ``True`` with # EXTREME CAUTION and ONLY IF you know what you are doing! # * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more # information. # * If an operator wishes to avoid showing any image location(s) # to the user, then both this option and # ``show_image_direct_url`` MUST be set to ``False``. # # Possible values: # * True # * False # # Related options: # * show_image_direct_url # * weight # # (boolean value) # This option is deprecated for removal since Newton. # Its value may be silently ignored in the future. # Reason: Use of this option, deprecated since Newton, is a security risk and # will be removed once we figure out a way to satisfy those use cases that # currently require it. An earlier announcement that the same functionality can # be achieved with greater granularity by using policies is incorrect. You # cannot work around this option via policy configuration at the present time, # though that is the direction we believe the fix will take. Please keep an eye # on the Glance release notes to stay up to date on progress in addressing this # issue. #show_multiple_locations = false # # Calculate hash and checksum for the image. # # This configuration option indicates that /v2/images/{image_id}/locations # POST API will calculate hash and checksum of the image on the fly. # If False it will silently ignore the hash and checksum calculation. # # Possible values: # * True # * False # (boolean value) #do_secure_hash = true # # The number of times to retry when any operation fails. # (integer value) #http_retries = 3 # # Maximum size of image a user can upload in bytes. # # An image upload greater than the size mentioned here would result # in an image creation failure. This configuration option defaults to # 1099511627776 bytes (1 TiB). # # NOTES: # * This value should only be increased after careful # consideration and must be set less than or equal to # 8 EiB (9223372036854775808). # * This value must be set with careful consideration of the # backend storage capacity. Setting this to a very low value # may result in a large number of image failures. And, setting # this to a very large value may result in faster consumption # of storage. Hence, this must be set according to the nature of # images created and storage capacity available. # # Possible values: # * Any positive number less than or equal to 9223372036854775808 # # (integer value) # Minimum value: 1 # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # # Maximum amount of image storage per tenant. # # This enforces an upper limit on the cumulative storage consumed by all images # of a tenant across all stores. This is a per-tenant limit. # # The default unit for this configuration option is Bytes. However, storage # units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, # ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and # TeraBytes respectively. Note that there should not be any space between the # value and unit. Value ``0`` signifies no quota enforcement. Negative values # are invalid and result in errors. # # This has no effect if ``use_keystone_limits`` is enabled. # # Possible values: # * A string that is a valid concatenation of a non-negative integer # representing the storage value and an optional string literal # representing storage units as mentioned above. # # Related options: # * use_keystone_limits # # (string value) #user_storage_quota = 0 # # Utilize per-tenant resource limits registered in Keystone. # # Enabling this feature will cause Glance to retrieve limits set in keystone # for resource consumption and enforce them against API users. Before turning # this on, the limits need to be registered in Keystone or all quotas will be # considered to be zero, and thus reject all new resource requests. # # These per-tenant resource limits are independent from the static # global ones configured in this config file. If this is enabled, the # relevant static global limits will be ignored. # (boolean value) #use_keystone_limits = false # # Host address of the pydev server. # # Provide a string value representing the hostname or IP of the # pydev server to use for debugging. The pydev server listens for # debug connections on this address, facilitating remote debugging # in Glance. # # Possible values: # * Valid hostname # * Valid IP address # # Related options: # * None # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #pydev_worker_debug_host = localhost # # Port number that the pydev server will listen on. # # Provide a port number to bind the pydev server to. The pydev # process accepts debug connections on this port and facilitates # remote debugging in Glance. # # Possible values: # * A valid port number # # Related options: # * None # # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # DEPRECATED: # AES key for encrypting store location metadata. # # Provide a string value representing the AES cipher to use for # encrypting Glance store metadata. # # NOTE: The AES key to use must be set to a random string of length # 16, 24 or 32 bytes. # # Possible values: # * String value representing a valid AES key # # Related options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option doesnt serves the purpose of encryption of location metadata, # whereas it encrypts location url only for specific APIs. Also if enabled # this during an upgrade may disrupt existing deployments, as it does not # support/provide db upgrade script to encrypt existing location URLs. # Moreover, its functionality for encrypting location URLs is inconsistent # which is resulting in download failures. #metadata_encryption_key = # DEPRECATED: # Digest algorithm to use for digital signature. # # Provide a string value representing the digest algorithm to # use for generating digital signatures. By default, ``sha256`` # is used. # # To get a list of the available algorithms supported by the version # of OpenSSL on your platform, run the command: # ``openssl list-message-digest-algorithms``. # Examples are 'sha1', 'sha256', and 'sha512'. # # NOTE: ``digest_algorithm`` is not related to Glance's image signing # and verification. It is only used to sign the universally unique # identifier (UUID) as a part of the certificate file and key file # validation. # # Possible values: # * An OpenSSL message digest algorithm identifier # # Relation options: # * None # # (string value) # This option is deprecated for removal since Dalmatian. # Its value may be silently ignored in the future. # Reason: # This option has had no effect since the removal of native SSL support. #digest_algorithm = sha256 # # The URL provides location where the temporary data will be stored # # This option is for Glance internal use only. Glance will save the # image data uploaded by the user to 'staging' endpoint during the # image import process. # # This option does not change the 'staging' API endpoint by any means. # # NOTE: It is discouraged to use same path as [task]/work_dir # # NOTE: 'file://' is the only option # api_image_import flow will support for now. # # NOTE: The staging path must be on shared filesystem available to all # Glance API nodes. # # Possible values: # * String starting with 'file://' followed by absolute FS path # # Related options: # * [task]/work_dir # # (string value) #node_staging_uri = file:///tmp/staging/ # # List of enabled Image Import Methods # # 'glance-direct', 'copy-image' and 'web-download' are enabled by default. # 'glance-download' is available, but requires federated deployments. # # Related options: # * [DEFAULT]/node_staging_uri (list value) #enabled_import_methods = [glance-direct,web-download,copy-image] # # The URL to this worker. # # If this is set, other glance workers will know how to contact this one # directly if needed. For image import, a single worker stages the image # and other workers need to be able to proxy the import request to the # right one. # # If unset, this will be considered to be `public_endpoint`, which # normally would be set to the same value on all workers, effectively # disabling the proxying behavior. # # Possible values: # * A URL by which this worker is reachable from other workers # # Related options: # * public_endpoint # # (string value) #worker_self_reference_url = # DEPRECATED: # The amount of time, in seconds, to delay image scrubbing. # # When delayed delete is turned on, an image is put into ``pending_delete`` # state upon deletion until the scrubber deletes its image data. Typically, soon # after the image is put into ``pending_delete`` state, it is available for # scrubbing. However, scrubbing can be delayed until a later point using this # configuration option. This option denotes the time period an image spends in # ``pending_delete`` state before it is available for scrubbing. # # It is important to realize that this has storage implications. The larger the # ``scrub_time``, the longer the time to reclaim backend storage from deleted # images. # # Possible values: # * Any non-negative integer # # Related options: # * ``delayed_delete`` # # (integer value) # Minimum value: 0 # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #scrub_time = 0 # DEPRECATED: # The size of thread pool to be used for scrubbing images. # # When there are a large number of images to scrub, it is beneficial to scrub # images in parallel so that the scrub queue stays in control and the backend # storage is reclaimed in a timely fashion. This configuration option denotes # the maximum number of images to be scrubbed in parallel. The default value is # one, which signifies serial scrubbing. Any value above one indicates parallel # scrubbing. # # Possible values: # * Any non-zero positive integer # # Related options: # * ``delayed_delete`` # # (integer value) # Minimum value: 1 # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #scrub_pool_size = 1 # DEPRECATED: # Turn on/off delayed delete. # # Typically when an image is deleted, the ``glance-api`` service puts the image # into ``deleted`` state and deletes its data at the same time. Delayed delete # is a feature in Glance that delays the actual deletion of image data until a # later point in time (as determined by the configuration option # ``scrub_time``). # When delayed delete is turned on, the ``glance-api`` service puts the image # into ``pending_delete`` state upon deletion and leaves the image data in the # storage backend for the image scrubber to delete at a later time. The image # scrubber will move the image into ``deleted`` state upon successful deletion # of image data. # # NOTE: When delayed delete is turned on, image scrubber MUST be running as a # periodic task to prevent the backend storage from filling up with undesired # usage. # # Possible values: # * True # * False # # Related options: # * ``scrub_time`` # * ``wakeup_time`` # * ``scrub_pool_size`` # # (boolean value) # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #delayed_delete = false # DEPRECATED: # Time interval, in seconds, between scrubber runs in daemon mode. # # Scrubber can be run either as a cron job or daemon. When run as a daemon, this # configuration time specifies the time period between two runs. When the # scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that # are available for scrubbing after taking ``scrub_time`` into consideration. # # If the wakeup time is set to a large number, there may be a large number of # images to be scrubbed for each run. Also, this impacts how quickly the backend # storage is reclaimed. # # Possible values: # * Any non-negative integer # # Related options: # * ``daemon`` # * ``delayed_delete`` # # (integer value) # Minimum value: 0 # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #wakeup_time = 300 # DEPRECATED: # Run scrubber as a daemon. # # This boolean configuration option indicates whether scrubber should # run as a long-running process that wakes up at regular intervals to # scrub images. The wake up interval can be specified using the # configuration option ``wakeup_time``. # # If this configuration option is set to ``False``, which is the # default value, scrubber runs once to scrub images and exits. In this # case, if the operator wishes to implement continuous scrubbing of # images, scrubber needs to be scheduled as a cron job. # # Possible values: # * True # * False # # Related options: # * ``wakeup_time`` # # (boolean value) # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #daemon = false # DEPRECATED: # Restore the image status from 'pending_delete' to 'active'. # # This option is used by administrator to reset the image's status from # 'pending_delete' to 'active' when the image is deleted by mistake and # 'pending delete' feature is enabled in Glance. Please make sure the # glance-scrubber daemon is stopped before restoring the image to avoid image # data inconsistency. # # Possible values: # * image's uuid # # (string value) # This option is deprecated for removal since 2024.1 (Caracal). # Its value may be silently ignored in the future. # Reason: The entire glance scrubber, including this option, is scheduled to be # removed during the 2024.2 (Dalmatian) development cycle. #restore = # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of the default # INFO level. (boolean value) # Note: This option can be changed without restarting. #debug = false # The name of a logging configuration file. This file is appended to any # existing logging configuration files. For details about logging configuration # files, see the Python logging module documentation. Note that when logging # configuration files are used then all logging configuration is set in the # configuration file and other logging configuration options are ignored (for # example, log-date-format). (string value) # Note: This option can be changed without restarting. # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. (string # value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default is set, # logging will go to stderr as defined by use_stderr. This option is ignored if # log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. This option # is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # DEPRECATED: Uses logging handler designed to watch file system. When log file # is moved or removed this handler will open a new log file with specified path # instantaneously. It makes sense only if log_file option is specified and Linux # platform is used. This option is ignored if log_config_append is set. (boolean # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This function is known to have bene broken for long time, and depends # on the unmaintained library #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and will be # changed later to honor RFC5424. This option is ignored if log_config_append is # set. (boolean value) #use_syslog = false # Enable journald for logging. If running in a systemd environment you may wish # to enable journal support. Doing so will use the journal native protocol which # includes structured metadata in addition to log messages.This option is # ignored if log_config_append is set. (boolean value) #use_journal = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Use JSON formatting for logging. This option is ignored if log_config_append # is set. (boolean value) #use_json = false # Log output to standard error. This option is ignored if log_config_append is # set. (boolean value) #use_stderr = false # DEPRECATED: Log output to Windows Event Log. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: Windows support is no longer maintained. #use_eventlog = false # (Optional) Set the 'color' key according to log levels. This option takes # effect only when logging to stderr or stdout is used. This option is ignored # if log_config_append is set. (boolean value) #log_color = false # The amount of time before the log files are rotated. This option is ignored # unless log_rotation_type is set to "interval". (integer value) #log_rotate_interval = 1 # Rotation interval type. The time of the last file change (or the time when the # service was started) is used when scheduling the next rotation. (string value) # Possible values: # Seconds - # Minutes - # Hours - # Days - # Weekday - # Midnight - #log_rotate_interval_type = days # Maximum number of rotated log files. (integer value) #max_logfile_count = 30 # Log file maximum size in MB. This option is ignored if "log_rotation_type" is # not set to "size". (integer value) #max_logfile_size_mb = 200 # Log rotation type. (string value) # Possible values: # interval - Rotate logs at predefined time intervals. # size - Rotate logs once they reach a predefined size. # none - Do not rotate log files. #log_rotation_type = none # Format string to use for log messages with context. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the message is # DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. Used by # oslo_log.formatters.ContextFormatter (string value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. Used by oslo_log.formatters.ContextFormatter # (string value) #logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is ignored # if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. (string # value) #instance_uuid_format = "[instance: %(uuid)s] " # Interval, number of seconds, of log rate limiting. (integer value) #rate_limit_interval = 0 # Maximum number of logged messages per rate_limit_interval. (integer value) #rate_limit_burst = 0 # Log level name used by rate limiting. Logs with level greater or equal to # rate_limit_except_level are not filtered. An empty string means that all # levels are filtered. (string value) # Possible values: # CRITICAL - # ERROR - # INFO - # WARNING - # DEBUG - # '' - #rate_limit_except_level = CRITICAL # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [database] # # From oslo.db # # If True, SQLite uses synchronous mode. (boolean value) #sqlite_synchronous = true # The back end to use for the database. (string value) #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. (string # value) #connection = # The SQLAlchemy connection string to use to connect to the slave database. # (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including the # default, overrides any server-set SQL mode. To use whatever SQL mode is set by # the server configuration, set this to no value. Example: mysql_sql_mode= # (string value) #mysql_sql_mode = TRADITIONAL # For Galera only, configure wsrep_sync_wait causality checks on new # connections. Default is None, meaning don't configure any setting. (integer # value) #mysql_wsrep_sync_wait = # Connections which have been present in the connection pool longer than this # number of seconds will be replaced with a new one the next time they are # checked out from the pool. (integer value) #connection_recycle_time = 3600 # Maximum number of SQL connections to keep open in a pool. Setting a value of 0 # indicates no limit. (integer value) #max_pool_size = 5 # Maximum number of database connection retries during startup. Set to -1 to # specify an infinite retry count. (integer value) #max_retries = 10 # Interval between retries of opening a SQL connection. (integer value) #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer value) #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. (integer # value) # Minimum value: 0 # Maximum value: 100 #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer value) #pool_timeout = # Enable the experimental use of database reconnect on connection lost. (boolean # value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database operation up to # db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries of a # database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before error is # raised. Set to -1 to specify an infinite retry count. (integer value) #db_max_retries = 20 # Optional URL parameters to append onto the connection URL at connect time; # specify as param1=value1¶m2=value2&... (string value) #connection_parameters = [glance_store] # # From glance.store # # DEPRECATED: # List of enabled Glance stores. # # Register the storage backends to use for storing disk images # as a comma separated list. The default stores enabled for # storing disk images with Glance are ``file`` and ``http``. # # Possible values: # * A comma separated list that could include: # * file # * http # * swift # * rbd # * cinder # * vmware # * s3 # # Related Options: # * default_store # # (list value) # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``enabled_backends`` which helps to configure multiple backend stores # of different schemes. # # This option is scheduled for removal in the U development # cycle. #stores = file,http # DEPRECATED: # The default scheme to use for storing images. # # Provide a string value representing the default scheme to use for # storing images. If not set, Glance uses ``file`` as the default # scheme to store images with the ``file`` store. # # NOTE: The value given for this configuration option must be a valid # scheme for a store registered with the ``stores`` configuration # option. # # Possible values: # * file # * filesystem # * http # * https # * swift # * swift+http # * swift+https # * swift+config # * rbd # * cinder # * vsphere # * s3 # # Related Options: # * stores # # (string value) # Possible values: # file - # filesystem - # http - # https - # swift - # swift+http - # swift+https - # swift+config - # rbd - # cinder - # vsphere - # s3 - # This option is deprecated for removal since Rocky. # Its value may be silently ignored in the future. # Reason: # This option is deprecated against new config option # ``default_backend`` which acts similar to ``default_store`` config # option. # # This option is scheduled for removal in the U development # cycle. #default_store = file # # Information to match when looking for cinder in the service catalog. # # When the ``cinder_endpoint_template`` is not set and any of # ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, ``cinder_store_password`` is not set, # cinder store uses this information to lookup cinder endpoint from the service # catalog in the current context. ``cinder_os_region_name``, if set, is taken # into consideration to fetch the appropriate endpoint. # # The service catalog can be listed by the ``openstack catalog list`` command. # # Possible values: # * A string of of the following form: # ``::`` # At least ``service_type`` and ``interface`` should be specified. # ``service_name`` can be omitted. # # Related options: # * cinder_os_region_name # * cinder_endpoint_template # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_catalog_info = volumev3::publicURL # # Override service catalog lookup with template for cinder endpoint. # # When this option is set, this value is used to generate cinder endpoint, # instead of looking up from the service catalog. # This value is ignored if ``cinder_store_auth_address``, # ``cinder_store_user_name``, ``cinder_store_project_name``, and # ``cinder_store_password`` are specified. # # If this configuration option is set, ``cinder_catalog_info`` will be ignored. # # Possible values: # * URL template string for cinder endpoint, where ``%%(tenant)s`` is # replaced with the current tenant (project) name. # For example: ``http://cinder.openstack.example.org/v2/%%(tenant)s`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_password # * cinder_store_project_domain_name # * cinder_store_user_domain_name # * cinder_catalog_info # # (string value) #cinder_endpoint_template = # # Region name to lookup cinder service from the service catalog. # # This is used only when ``cinder_catalog_info`` is used for determining the # endpoint. If set, the lookup for cinder endpoint by this node is filtered to # the specified region. It is useful when multiple regions are listed in the # catalog. If this is not set, the endpoint is looked up from every region. # # Possible values: # * A string that is a valid region name. # # Related options: # * cinder_catalog_info # # (string value) # Deprecated group/name - [glance_store]/os_region_name #cinder_os_region_name = # # Location of a CA certificates file used for cinder client requests. # # The specified CA certificates file, if set, is used to verify cinder # connections via HTTPS endpoint. If the endpoint is HTTP, this value is # ignored. # ``cinder_api_insecure`` must be set to ``True`` to enable the verification. # # Possible values: # * Path to a ca certificates file # # Related options: # * cinder_api_insecure # # (string value) #cinder_ca_certificates_file = # # Number of cinderclient retries on failed http calls. # # When a call failed by any errors, cinderclient will retry the call up to the # specified times after sleeping a few seconds. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_http_retries = 3 # # Time period, in seconds, to wait for a cinder volume transition to # complete. # # When the cinder volume is created, deleted, or attached to the glance node to # read/write the volume data, the volume's state is changed. For example, the # newly created volume status changes from ``creating`` to ``available`` after # the creation process is completed. This specifies the maximum time to wait for # the status change. If a timeout occurs while waiting, or the status is changed # to an unexpected value (e.g. `error``), the image creation fails. # # Possible values: # * A positive integer # # Related options: # * None # # (integer value) # Minimum value: 0 #cinder_state_transition_timeout = 300 # # Allow to perform insecure SSL requests to cinder. # # If this option is set to True, HTTPS endpoint connection is verified using the # CA certificates file specified by ``cinder_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * cinder_ca_certificates_file # # (boolean value) #cinder_api_insecure = false # # The address where the cinder authentication service is listening. # # When all of ``cinder_store_auth_address``, ``cinder_store_user_name``, # ``cinder_store_project_name``, and ``cinder_store_password`` options are # specified, the specified values are always used for the authentication. # This is useful to hide the image volumes from users by storing them in a # project/tenant specific to the image service. It also enables users to share # the image volume among other projects under the control of glance's ACL. # # If either of these options are not set, the cinder endpoint is looked up # from the service catalog, and current context's user and project are used. # # Possible values: # * A valid authentication service address, for example: # ``http://openstack.example.org/identity/v2.0`` # # Related options: # * cinder_store_user_name # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_auth_address = # # User name to authenticate against cinder. # # This must be used with all the following non-domain-related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid user name # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_user_name = # # Domain of the user to authenticate against cinder. # # Possible values: # * A valid domain name for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_password # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_name # # (string value) #cinder_store_user_domain_name = Default # # Password for the user authenticating against cinder. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid password for the user specified by ``cinder_store_user_name`` # # Related options: # * cinder_store_auth_address # * cinder_store_user_name # * cinder_store_project_name # * cinder_store_project_domain_name # * cinder_store_user_domain_name # # (string value) #cinder_store_password = # # Project name where the image volume is stored in cinder. # # If this configuration option is not set, the project in current context is # used. # # This must be used with all the following related options. # If any of these are not specified (except domain-related options), # the user of the current context is used. # # Possible values: # * A valid project name # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_name = # # Domain of the project where the image volume is stored in cinder. # # Possible values: # * A valid domain name of the project specified by # ``cinder_store_project_name`` # # Related options: # * ``cinder_store_auth_address`` # * ``cinder_store_user_name`` # * ``cinder_store_password`` # * ``cinder_store_project_domain_name`` # * ``cinder_store_user_domain_name`` # # (string value) #cinder_store_project_domain_name = Default # # Path to the rootwrap configuration file to use for running commands as root. # # The cinder store requires root privileges to operate the image volumes (for # connecting to iSCSI/FC volumes and reading/writing the volume data, etc.). # The configuration file should allow the required commands by cinder store and # os-brick library. # # Possible values: # * Path to the rootwrap config file # # Related options: # * None # # (string value) #rootwrap_config = /etc/glance/rootwrap.conf # # Volume type that will be used for volume creation in cinder. # # Some cinder backends can have several volume types to optimize storage usage. # Adding this option allows an operator to choose a specific volume type # in cinder that can be optimized for images. # # If this is not set, then the default volume type specified in the cinder # configuration will be used for volume creation. # # Possible values: # * A valid volume type from cinder # # Related options: # * None # # NOTE: You cannot use an encrypted volume_type associated with an NFS backend. # An encrypted volume stored on an NFS backend will raise an exception whenever # glance_store tries to write or access image data stored in that volume. # Consult your Cinder administrator to determine an appropriate volume_type. # # (string value) #cinder_volume_type = # # If this is set to True, attachment of volumes for image transfer will # be aborted when multipathd is not running. Otherwise, it will fallback # to single path. # # Possible values: # * True or False # # Related options: # * cinder_use_multipath # # (boolean value) #cinder_enforce_multipath = false # # Flag to identify multipath is supported or not in the deployment. # # Set it to False if multipath is not supported. # # Possible values: # * True or False # # Related options: # * cinder_enforce_multipath # # (boolean value) #cinder_use_multipath = false # # Directory where the NFS volume is mounted on the glance node. # # Possible values: # # * A string representing absolute path of mount point. # (string value) #cinder_mount_point_base = /var/lib/glance/mnt # # If this is set to True, glance will perform an extend operation # on the attached volume. Only enable this option if the cinder # backend driver supports the functionality of extending online # (in-use) volumes. Supported from cinder microversion 3.42 and # onwards. By default, it is set to False. # # Possible values: # * True or False # # (boolean value) #cinder_do_extend_attached = false # # Directory to which the filesystem backend store writes images. # # Upon start up, Glance creates the directory if it doesn't already # exist and verifies write access to the user under which # ``glance-api`` runs. If the write access isn't available, a # ``BadStoreConfiguration`` exception is raised and the filesystem # store may not be available for adding new images. # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * A valid path to a directory # # Related options: # * ``filesystem_store_datadirs`` # * ``filesystem_store_file_perm`` # # (string value) #filesystem_store_datadir = /var/lib/glance/images # # List of directories and their priorities to which the filesystem # backend store writes images. # # The filesystem store can be configured to store images in multiple # directories as opposed to using a single directory specified by the # ``filesystem_store_datadir`` configuration option. When using # multiple directories, each directory can be given an optional # priority to specify the preference order in which they should # be used. Priority is an integer that is concatenated to the # directory path with a colon where a higher value indicates higher # priority. When two directories have the same priority, the directory # with most free space is used. When no priority is specified, it # defaults to zero. # # More information on configuring filesystem store with multiple store # directories can be found at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # NOTE: This directory is used only when filesystem store is used as a # storage backend. Either ``filesystem_store_datadir`` or # ``filesystem_store_datadirs`` option must be specified in # ``glance-api.conf``. If both options are specified, a # ``BadStoreConfiguration`` will be raised and the filesystem store # may not be available for adding new images. # # Possible values: # * List of strings of the following form: # * ``:`` # # Related options: # * ``filesystem_store_datadir`` # * ``filesystem_store_file_perm`` # # (multi valued) #filesystem_store_datadirs = # # Filesystem store metadata file. # # The path to a file which contains the metadata to be returned with any # location # associated with the filesystem store. Once this option is set, it is used for # new images created afterward only - previously existing images are not # affected. # # The file must contain a valid JSON object. The object should contain the keys # ``id`` and ``mountpoint``. The value for both keys should be a string. # # Possible values: # * A valid path to the store metadata file # # Related options: # * None # # (string value) #filesystem_store_metadata_file = # # File access permissions for the image files. # # Set the intended file access permissions for image data. This provides # a way to enable other services, e.g. Nova, to consume images directly # from the filesystem store. The users running the services that are # intended to be given access to could be made a member of the group # that owns the files created. Assigning a value less then or equal to # zero for this configuration option signifies that no changes be made # to the default permissions. This value will be decoded as an octal # digit. # # For more information, please refer the documentation at # https://docs.openstack.org/glance/latest/configuration/configuring.html # # Possible values: # * A valid file access permission # * Zero # * Any negative integer # # Related options: # * None # # (integer value) #filesystem_store_file_perm = 0 # # Chunk size, in bytes. # # The chunk size used when reading or writing image files. Raising this value # may improve the throughput but it may also slightly increase the memory usage # when handling a large number of requests. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #filesystem_store_chunk_size = 65536 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the filesystem, the holes who can appear will automatically # be interpreted by the filesystem as null bytes, and do not really consume # your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #filesystem_thin_provisioning = false # # Path to the CA bundle file. # # This configuration option enables the operator to use a custom # Certificate Authority file to verify the remote server certificate. If # this option is set, the ``https_insecure`` option will be ignored and # the CA file specified will be used to authenticate the server # certificate and establish a secure connection to the server. # # Possible values: # * A valid path to a CA file # # Related options: # * https_insecure # # (string value) #https_ca_certificates_file = # # Set verification of the remote server certificate. # # This configuration option takes in a boolean value to determine # whether or not to verify the remote server certificate. If set to # True, the remote server certificate is not verified. If the option is # set to False, then the default CA truststore is used for verification. # # This option is ignored if ``https_ca_certificates_file`` is set. # The remote server certificate will then be verified using the file # specified using the ``https_ca_certificates_file`` option. # # Possible values: # * True # * False # # Related options: # * https_ca_certificates_file # # (boolean value) #https_insecure = true # # The http/https proxy information to be used to connect to the remote # server. # # This configuration option specifies the http/https proxy information # that should be used to connect to the remote server. The proxy # information should be a key value pair of the scheme and proxy, for # example, http:10.0.0.1:3128. You can also specify proxies for multiple # schemes by separating the key value pairs with a comma, for example, # http:10.0.0.1:3128, https:10.0.0.1:1080. # # Possible values: # * A comma separated list of scheme:proxy pairs as described above # # Related options: # * None # # (dict value) #http_proxy_information = # # Size, in megabytes, to chunk RADOS images into. # # Provide an integer value representing the size in megabytes to chunk # Glance images into. The default chunk size is 8 megabytes. For optimal # performance, the value should be a power of two. # # When Ceph's RBD object storage system is used as the storage backend # for storing Glance images, the images are chunked into objects of the # size set using this option. These chunked objects are then stored # across the distributed block data store to use for Glance. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #rbd_store_chunk_size = 8 # # RADOS pool in which images are stored. # # When RBD is used as the storage backend for storing Glance images, the # images are stored by means of logical grouping of the objects (chunks # of images) into a ``pool``. Each pool is defined with the number of # placement groups it can contain. The default pool that is used is # 'images'. # # More information on the RBD storage backend can be found here: # http://ceph.com/planet/how-data-is-stored-in-ceph-cluster/ # # Possible Values: # * A valid pool name # # Related options: # * None # # (string value) #rbd_store_pool = images # # RADOS user to authenticate as. # # This configuration option takes in the RADOS user to authenticate as. # This is only needed when RADOS authentication is enabled and is # applicable only if the user is using Cephx authentication. If the # value for this option is not set by the user or is set to None, a # default value will be chosen, which will be based on the client. # section in rbd_store_ceph_conf. # # Possible Values: # * A valid RADOS user # # Related options: # * rbd_store_ceph_conf # # (string value) #rbd_store_user = # # Ceph configuration file path. # # This configuration option specifies the path to the Ceph configuration # file to be used. If the value for this option is not set by the user # or is set to the empty string, librados will read the standard ceph.conf # file by searching the default Ceph configuration file locations in # sequential order. See the Ceph documentation for details. # # NOTE: If using Cephx authentication, this file should include a reference # to the right keyring in a client. section # # NOTE 2: If you leave this option empty (the default), the actual Ceph # configuration file used may change depending on what version of librados # is being used. If it is important for you to know exactly which configuration # file is in effect, you may specify that file here using this option. # # Possible Values: # * A valid path to a configuration file # # Related options: # * rbd_store_user # # (string value) #rbd_store_ceph_conf = # # Timeout value for connecting to Ceph cluster. # # This configuration option takes in the timeout value in seconds used # when connecting to the Ceph cluster i.e. it sets the time to wait for # glance-api before closing the connection. This prevents glance-api # hangups during the connection to RBD. If the value for this option # is set to less than 0, no timeout is set and the default librados value # is used. # # Possible Values: # * Any integer value # # Related options: # * None # # (integer value) #rados_connect_timeout = -1 # # Enable or not thin provisioning in this backend. # # This configuration option enable the feature of not really write null byte # sequences on the RBD backend, the holes who can appear will automatically # be interpreted by Ceph as null bytes, and do not really consume your storage. # Enabling this feature will also speed up image upload and save network traffic # in addition to save space in the backend, as null bytes sequences are not # sent over the network. # # Possible Values: # * True # * False # # Related options: # * None # # (boolean value) #rbd_thin_provisioning = false # # The host where the S3 server is listening. # # This configuration option sets the host of the S3 or S3 compatible storage # Server. This option is required when using the S3 storage backend. # The host can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com) # or an IP address (127.0.0.1). # # Possible values: # * A valid DNS name # * A valid IPv4 address # # Related Options: # * s3_store_access_key # * s3_store_secret_key # # (string value) #s3_store_host = # # The S3 region name. # # This parameter will set the region_name used by boto. # If this parameter is not set, we we will try to compute it from the # s3_store_host. # # Possible values: # * A valid region name # # Related Options: # * s3_store_host # # (string value) #s3_store_region_name = # # The S3 query token access key. # # This configuration option takes the access key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is the access key for a user with appropriate # privileges # # Related Options: # * s3_store_host # * s3_store_secret_key # # (string value) #s3_store_access_key = # # The S3 query token secret key. # # This configuration option takes the secret key for authenticating with the # Amazon S3 or S3 compatible storage server. This option is required when using # the S3 storage backend. # # Possible values: # * Any string value that is a secret key corresponding to the access key # specified using the ``s3_store_host`` option # # Related Options: # * s3_store_host # * s3_store_access_key # # (string value) #s3_store_secret_key = # # The S3 bucket to be used to store the Glance data. # # This configuration option specifies where the glance images will be stored # in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be # created automatically even if the bucket does not exist. # # Possible values: # * Any string value # # Related Options: # * s3_store_create_bucket_on_put # * s3_store_bucket_url_format # # (string value) #s3_store_bucket = # # Determine whether S3 should create a new bucket. # # This configuration option takes boolean value to indicate whether Glance # should # create a new bucket to S3 if it does not exist. # # Possible values: # * Any Boolean value # # Related Options: # * None # # (boolean value) #s3_store_create_bucket_on_put = false # # The S3 calling format used to determine the object. # # This configuration option takes access model that is used to specify the # address of an object in an S3 bucket. # # NOTE: # In ``path``-style, the endpoint for the object looks like # 'https://s3.amazonaws.com/bucket/example.img'. # And in ``virtual``-style, the endpoint for the object looks like # 'https://bucket.s3.amazonaws.com/example.img'. # If you do not follow the DNS naming convention in the bucket name, you can # get objects in the path style, but not in the virtual style. # # Possible values: # * Any string value of ``auto``, ``virtual``, or ``path`` # # Related Options: # * s3_store_bucket # # (string value) #s3_store_bucket_url_format = auto # # What size, in MB, should S3 start chunking image files and do a multipart # upload in S3. # # This configuration option takes a threshold in MB to determine whether to # upload the image to S3 as is or to split it (Multipart Upload). # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_chunk_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_size = 100 # # What multipart upload part size, in MB, should S3 use when uploading parts. # # This configuration option takes the image split size in MB for Multipart # Upload. # # Note: You can only split up to 10,000 images. # # Possible values: # * Any positive integer value (must be greater than or equal to 5M) # # Related Options: # * s3_store_large_object_size # * s3_store_thread_pools # # (integer value) #s3_store_large_object_chunk_size = 10 # # The number of thread pools to perform a multipart upload in S3. # # This configuration option takes the number of thread pools when performing a # Multipart Upload. # # Possible values: # * Any positive integer value # # Related Options: # * s3_store_large_object_size # * s3_store_large_object_chunk_size # # (integer value) #s3_store_thread_pools = 10 # # Set verification of the server certificate. # # This boolean determines whether or not to verify the server # certificate. If this option is set to True, swiftclient won't check # for a valid SSL certificate when authenticating. If the option is set # to False, then the default CA truststore is used for verification. # # Possible values: # * True # * False # # Related options: # * swift_store_cacert # # (boolean value) #swift_store_auth_insecure = false # # Path to the CA bundle file. # # This configuration option enables the operator to specify the path to # a custom Certificate Authority file for SSL verification when # connecting to Swift. # # Possible values: # * A valid path to a CA file # # Related options: # * swift_store_auth_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_cacert = /etc/ssl/certs/ca-certificates.crt # # The region of Swift endpoint to use by Glance. # # Provide a string value representing a Swift region where Glance # can connect to for image storage. By default, there is no region # set. # # When Glance uses Swift as the storage backend to store images # for a specific tenant that has multiple endpoints, setting of a # Swift region with ``swift_store_region`` allows Glance to connect # to Swift in the specified region as opposed to a single region # connectivity. # # This option can be configured for both single-tenant and # multi-tenant storage. # # NOTE: Setting the region with ``swift_store_region`` is # tenant-specific and is necessary ``only if`` the tenant has # multiple endpoints across different regions. # # Possible values: # * A string value representing a valid Swift region. # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_region = RegionTwo # # The URL endpoint to use for Swift backend storage. # # Provide a string value representing the URL endpoint to use for # storing Glance images in Swift store. By default, an endpoint # is not set and the storage URL returned by ``auth`` is used. # Setting an endpoint with ``swift_store_endpoint`` overrides the # storage URL and is used for Glance image storage. # # NOTE: The URL should include the path up to, but excluding the # container. The location of an object is obtained by appending # the container and object to the configured URL. # # Possible values: # * String value representing a valid URL path up to a Swift container # # Related Options: # * None # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #swift_store_endpoint = https://swift.openstack.example.org/v1/path_not_including_container_name # # Endpoint Type of Swift service. # # This string value indicates the endpoint type to use to fetch the # Swift endpoint. The endpoint type determines the actions the user will # be allowed to perform, for instance, reading and writing to the Store. # This setting is only used if swift_store_auth_version is greater than # 1. # # Possible values: # * publicURL # * adminURL # * internalURL # # Related options: # * swift_store_endpoint # # (string value) # Possible values: # publicURL - # adminURL - # internalURL - #swift_store_endpoint_type = publicURL # # Type of Swift service to use. # # Provide a string value representing the service type to use for # storing images while using Swift backend storage. The default # service type is set to ``object-store``. # # NOTE: If ``swift_store_auth_version`` is set to 2, the value for # this configuration option needs to be ``object-store``. If using # a higher version of Keystone or a different auth scheme, this # option may be modified. # # Possible values: # * A string representing a valid service type for Swift storage. # # Related Options: # * None # # (string value) #swift_store_service_type = object-store # # Name of single container to store images/name prefix for multiple containers # # When a single container is being used to store images, this configuration # option indicates the container within the Glance account to be used for # storing all images. When multiple containers are used to store images, this # will be the name prefix for all containers. Usage of single/multiple # containers can be controlled using the configuration option # ``swift_store_multiple_containers_seed``. # # When using multiple containers, the containers will be named after the value # set for this configuration option with the first N chars of the image UUID # as the suffix delimited by an underscore (where N is specified by # ``swift_store_multiple_containers_seed``). # # Example: if the seed is set to 3 and swift_store_container = ``glance``, then # an image with UUID ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in # the container ``glance_fda``. All dashes in the UUID are included when # creating the container name but do not count toward the character limit, so # when N=10 the container name would be ``glance_fdae39a1-ba.`` # # Possible values: # * If using single container, this configuration option can be any string # that is a valid swift container name in Glance's Swift account # * If using multiple containers, this configuration option can be any # string as long as it satisfies the container naming rules enforced by # Swift. The value of ``swift_store_multiple_containers_seed`` should be # taken into account as well. # # Related options: # * ``swift_store_multiple_containers_seed`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (string value) #swift_store_container = glance # # The size threshold, in MB, after which Glance will start segmenting image # data. # # Swift has an upper limit on the size of a single uploaded object. By default, # this is 5GB. To upload objects bigger than this limit, objects are segmented # into multiple smaller objects that are tied together with a manifest file. # For more detail, refer to # https://docs.openstack.org/swift/latest/overview_large_objects.html # # This configuration option specifies the size threshold over which the Swift # driver will start segmenting image data into multiple smaller files. # Currently, the Swift driver only supports creating Dynamic Large Objects. # # NOTE: This should be set by taking into account the large object limit # enforced by the Swift cluster in consideration. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by the Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_chunk_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_size = 5120 # # The maximum size, in MB, of the segments when image data is segmented. # # When image data is segmented to upload images that are larger than the limit # enforced by the Swift cluster, image data is broken into segments that are no # bigger than the size specified by this configuration option. # Refer to ``swift_store_large_object_size`` for more detail. # # For example: if ``swift_store_large_object_size`` is 5GB and # ``swift_store_large_object_chunk_size`` is 1GB, an image of size 6.2GB will be # segmented into 7 segments where the first six segments will be 1GB in size and # the seventh segment will be 0.2GB. # # Possible values: # * A positive integer that is less than or equal to the large object limit # enforced by Swift cluster in consideration. # # Related options: # * ``swift_store_large_object_size`` # # (integer value) # Minimum value: 1 #swift_store_large_object_chunk_size = 200 # # Create container, if it doesn't already exist, when uploading image. # # At the time of uploading an image, if the corresponding container doesn't # exist, it will be created provided this configuration option is set to True. # By default, it won't be created. This behavior is applicable for both single # and multiple containers mode. # # Possible values: # * True # * False # # Related options: # * None # # (boolean value) #swift_store_create_container_on_put = false # # Store images in tenant's Swift account. # # This enables multi-tenant storage mode which causes Glance images to be stored # in tenant specific Swift accounts. If this is disabled, Glance stores all # images in its own account. More details multi-tenant store can be found at # https://wiki.openstack.org/wiki/GlanceSwiftTenantSpecificStorage # # NOTE: If using multi-tenant swift store, please make sure # that you do not set a swift configuration file with the # 'swift_store_config_file' option. # # Possible values: # * True # * False # # Related options: # * swift_store_config_file # # (boolean value) #swift_store_multi_tenant = false # # Seed indicating the number of containers to use for storing images. # # When using a single-tenant store, images can be stored in one or more than one # containers. When set to 0, all images will be stored in one single container. # When set to an integer value between 1 and 32, multiple containers will be # used to store images. This configuration option will determine how many # containers are created. The total number of containers that will be used is # equal to 16^N, so if this config option is set to 2, then 16^2=256 containers # will be used to store images. # # Please refer to ``swift_store_container`` for more detail on the naming # convention. More detail about using multiple containers can be found at # https://specs.openstack.org/openstack/glance-specs/specs/kilo/swift-store- # multiple-containers.html # # NOTE: This is used only when swift_store_multi_tenant is disabled. # # Possible values: # * A non-negative integer less than or equal to 32 # # Related options: # * ``swift_store_container`` # * ``swift_store_multi_tenant`` # * ``swift_store_create_container_on_put`` # # (integer value) # Minimum value: 0 # Maximum value: 32 #swift_store_multiple_containers_seed = 0 # # List of tenants that will be granted admin access. # # This is a list of tenants that will be granted read/write access on # all Swift containers created by Glance in multi-tenant mode. The # default value is an empty list. # # Possible values: # * A comma separated list of strings representing UUIDs of Keystone # projects/tenants # # Related options: # * None # # (list value) #swift_store_admin_tenants = # # SSL layer compression for HTTPS Swift requests. # # Provide a boolean value to determine whether or not to compress # HTTPS Swift requests for images at the SSL layer. By default, # compression is enabled. # # When using Swift as the backend store for Glance image storage, # SSL layer compression of HTTPS Swift requests can be set using # this option. If set to False, SSL layer compression of HTTPS # Swift requests is disabled. Disabling this option may improve # performance for images which are already in a compressed format, # for example, qcow2. # # Possible values: # * True # * False # # Related Options: # * None # # (boolean value) #swift_store_ssl_compression = true # # The number of times a Swift download will be retried before the # request fails. # # Provide an integer value representing the number of times an image # download must be retried before erroring out. The default value is # zero (no retry on a failed image download). When set to a positive # integer value, ``swift_store_retry_get_count`` ensures that the # download is attempted this many more times upon a download failure # before sending an error message. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_retry_get_count = 0 # # Time in seconds defining the size of the window in which a new # token may be requested before the current token is due to expire. # # Typically, the Swift storage driver fetches a new token upon the # expiration of the current token to ensure continued access to # Swift. However, some Swift transactions (like uploading image # segments) may not recover well if the token expires on the fly. # # Hence, by fetching a new token before the current token expiration, # we make sure that the token does not expire or is close to expiry # before a transaction is attempted. By default, the Swift storage # driver requests for a new token 60 seconds or less before the # current token expiration. # # Possible values: # * Zero # * Positive integer value # # Related Options: # * None # # (integer value) # Minimum value: 0 #swift_store_expire_soon_interval = 60 # # Use trusts for multi-tenant Swift store. # # This option instructs the Swift store to create a trust for each # add/get request when the multi-tenant store is in use. Using trusts # allows the Swift store to avoid problems that can be caused by an # authentication token expiring during the upload or download of data. # # By default, ``swift_store_use_trusts`` is set to ``True``(use of # trusts is enabled). If set to ``False``, a user token is used for # the Swift connection instead, eliminating the overhead of trust # creation. # # NOTE: This option is considered only when # ``swift_store_multi_tenant`` is set to ``True`` # # Possible values: # * True # * False # # Related options: # * swift_store_multi_tenant # # (boolean value) #swift_store_use_trusts = true # # Buffer image segments before upload to Swift. # # Provide a boolean value to indicate whether or not Glance should # buffer image data to disk while uploading to swift. This enables # Glance to resume uploads on error. # # NOTES: # When enabling this option, one should take great care as this # increases disk usage on the API node. Be aware that depending # upon how the file system is configured, the disk space used # for buffering may decrease the actual disk space available for # the glance image cache. Disk utilization will cap according to # the following equation: # (``swift_store_large_object_chunk_size`` * ``workers`` * 1000) # # Possible values: # * True # * False # # Related options: # * swift_upload_buffer_dir # # (boolean value) #swift_buffer_on_upload = false # # Reference to default Swift account/backing store parameters. # # Provide a string value representing a reference to the default set # of parameters required for using swift account/backing store for # image storage. The default reference value for this configuration # option is 'ref1'. This configuration option dereferences the # parameters and facilitates image storage in Swift storage backend # every time a new image is added. # # Possible values: # * A valid string value # # Related options: # * None # # (string value) #default_swift_reference = ref1 # DEPRECATED: Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_version' in the Swift back-end configuration file is # used instead. #swift_store_auth_version = 2 # DEPRECATED: The address where the Swift authentication service is listening. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'auth_address' in the Swift back-end configuration file is # used instead. #swift_store_auth_address = # DEPRECATED: The user to authenticate against the Swift authentication service. # (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'user' in the Swift back-end configuration file is set instead. #swift_store_user = # DEPRECATED: Auth key for the user authenticating against the Swift # authentication service. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: # The option 'key' in the Swift back-end configuration file is used # to set the authentication key instead. #swift_store_key = # # Absolute path to the file containing the swift account(s) # configurations. # # Include a string value representing the path to a configuration # file that has references for each of the configured Swift # account(s)/backing stores. By default, no file path is specified # and customized Swift referencing is disabled. Configuring this # option is highly recommended while using Swift storage backend for # image storage as it avoids storage of credentials in the database. # # NOTE: Please do not configure this option if you have set # ``swift_store_multi_tenant`` to ``True``. # # Possible values: # * String value representing an absolute path on the glance-api # node # # Related options: # * swift_store_multi_tenant # # (string value) #swift_store_config_file = # # Directory to buffer image segments before upload to Swift. # # Provide a string value representing the absolute path to the # directory on the glance node where image segments will be # buffered briefly before they are uploaded to swift. # # NOTES: # * This is required only when the configuration option # ``swift_buffer_on_upload`` is set to True. # * This directory should be provisioned keeping in mind the # ``swift_store_large_object_chunk_size`` and the maximum # number of images that could be uploaded simultaneously by # a given glance node. # # Possible values: # * String value representing an absolute directory path # # Related options: # * swift_buffer_on_upload # * swift_store_large_object_chunk_size # # (string value) #swift_upload_buffer_dir = # # Address of the ESX/ESXi or vCenter Server target system. # # This configuration option sets the address of the ESX/ESXi or vCenter # Server target system. This option is required when using the VMware # storage backend. The address can contain an IP address (127.0.0.1) or # a DNS name (www.my-domain.com). # # Possible Values: # * A valid IPv4 or IPv6 address # * A valid DNS name # # Related options: # * vmware_server_username # * vmware_server_password # # (host address value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_host = 127.0.0.1 # # Server username. # # This configuration option takes the username for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is the username for a user with appropriate # privileges # # Related options: # * vmware_server_host # * vmware_server_password # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_username = root # # Server password. # # This configuration option takes the password for authenticating with # the VMware ESX/ESXi or vCenter Server. This option is required when # using the VMware storage backend. # # Possible Values: # * Any string that is a password corresponding to the username # specified using the "vmware_server_username" option # # Related options: # * vmware_server_host # * vmware_server_username # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_server_password = vmware # # The number of VMware API retries. # # This configuration option specifies the number of times the VMware # ESX/VC server API must be retried upon connection related issues or # server API call overload. It is not possible to specify 'retry # forever'. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_api_retry_count = 10 # # Interval in seconds used for polling remote tasks invoked on VMware # ESX/VC server. # # This configuration option takes in the sleep time in seconds for polling an # on-going async task as part of the VMWare ESX/VC server API call. # # Possible Values: # * Any positive integer value # # Related options: # * None # # (integer value) # Minimum value: 1 #vmware_task_poll_interval = 5 # # The directory where the glance images will be stored in the datastore. # # This configuration option specifies the path to the directory where the # glance images will be stored in the VMware datastore. If this option # is not set, the default directory where the glance images are stored # is openstack_glance. # # Possible Values: # * Any string that is a valid path to a directory # # Related options: # * None # # (string value) #vmware_store_image_dir = /openstack_glance # # Set verification of the ESX/vCenter server certificate. # # This configuration option takes a boolean value to determine # whether or not to verify the ESX/vCenter server certificate. If this # option is set to True, the ESX/vCenter server certificate is not # verified. If this option is set to False, then the default CA # truststore is used for verification. # # This option is ignored if the "vmware_ca_file" option is set. In that # case, the ESX/vCenter server certificate will then be verified using # the file specified using the "vmware_ca_file" option . # # Possible Values: # * True # * False # # Related options: # * vmware_ca_file # # (boolean value) # Deprecated group/name - [glance_store]/vmware_api_insecure #vmware_insecure = false # # Absolute path to the CA bundle file. # # This configuration option enables the operator to use a custom # Cerificate Authority File to verify the ESX/vCenter certificate. # # If this option is set, the "vmware_insecure" option will be ignored # and the CA file specified will be used to authenticate the ESX/vCenter # server certificate and establish a secure connection to the server. # # Possible Values: # * Any string that is a valid absolute path to a CA file # # Related options: # * vmware_insecure # # (string value) # # This option has a sample default set, which means that # its actual default value may vary from the one documented # below. #vmware_ca_file = /etc/ssl/certs/ca-certificates.crt # # The datastores where the image can be stored. # # This configuration option specifies the datastores where the image can # be stored in the VMWare store backend. This option may be specified # multiple times for specifying multiple datastores. The datastore name # should be specified after its datacenter path, separated by ":". An # optional weight may be given after the datastore name, separated again # by ":" to specify the priority. Thus, the required format becomes # ::. # # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases # where the image size is already known. If no weight is given, it is # assumed to be zero and the directory will be considered for selection # last. If multiple datastores have the same weight, then the one with # the most free space available is selected. # # Possible Values: # * Any string of the format: # :: # # Related options: # * None # # (multi valued) #vmware_datastores = [os_brick] # # From os_brick # # Directory to use for os-brick lock files. Defaults to # oslo_concurrency.lock_path which is a sensible default for compute nodes, but # not for HCI deployments or controllers where Glance uses Cinder as a backend, # as locks should use the same directory. (string value) #lock_path = [oslo_concurrency] # # From oslo.concurrency # # Enables or disables inter-process locks. (boolean value) #disable_process_locking = false # Directory to use for lock files. For security, the specified directory should # only be writable by the user running the processes that need locking. Defaults # to environment variable OSLO_LOCK_PATH. If external locks are used, a lock # path must be set. (string value) #lock_path = [oslo_policy] # # From oslo.policy # # This option controls whether or not to enforce scope when evaluating policies. # If ``True``, the scope of the token used in the request is compared to the # ``scope_types`` of the policy being enforced. If the scopes do not match, an # ``InvalidScope`` exception will be raised. If ``False``, a message will be # logged informing operators that policies are being invoked with mismatching # scope. (boolean value) #enforce_scope = true # This option controls whether or not to use old deprecated defaults when # evaluating policies. If ``True``, the old deprecated defaults are not going to # be evaluated. This means if any existing token is allowed for old defaults but # is disallowed for new defaults, it will be disallowed. It is encouraged to # enable this flag along with the ``enforce_scope`` flag so that you can get the # benefits of new defaults and ``scope_type`` together. If ``False``, the # deprecated policy check string is logically OR'd with the new policy check # string, allowing for a graceful upgrade experience between releases with new # policies, which is the default behavior. (boolean value) #enforce_new_defaults = true # The relative or absolute path of a file that maps roles to permissions for a # given service. Relative paths must be specified in relation to the # configuration file setting this option. (string value) #policy_file = policy.yaml # Default rule. Enforced when a requested rule is not found. (string value) #policy_default_rule = default # Directories where policy configuration files are stored. They can be relative # to any directory in the search path defined by the config_dir option, or # absolute paths. The file defined by policy_file must exist for these # directories to be searched. Missing or empty directories are ignored. (multi # valued) #policy_dirs = policy.d # Content Type to send and receive data for REST based policy check (string # value) # Possible values: # application/x-www-form-urlencoded - # application/json - #remote_content_type = application/x-www-form-urlencoded # server identity verification for REST based policy check (boolean value) #remote_ssl_verify_server_crt = false # Absolute path to ca cert file for REST based policy check (string value) #remote_ssl_ca_crt_file = # Absolute path to client cert for REST based policy check (string value) #remote_ssl_client_crt_file = # Absolute path client key file REST based policy check (string value) #remote_ssl_client_key_file = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/glance-swift.conf.sample0000664000175000017500000000113600000000000020344 0ustar00zuulzuul00000000000000# glance-swift.conf.sample # # This file is an example config file when # multiple swift accounts/backing stores are enabled. # # Specify the reference name in [] # For each section, specify the auth_address, user and key. # # WARNING: # * If any of auth_address, user or key is not specified, # the glance-api's swift store will fail to configure [ref1] user = tenant:user1 key = key1 auth_version = 2 auth_address = http://localhost:5000/v2.0 [ref2] user = project_name:user_name2 key = key2 user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://localhost:5000/v3 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8383007 glance-29.0.0/etc/metadefs/0000775000175000017500000000000000000000000015421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/README0000664000175000017500000000041600000000000016302 0ustar00zuulzuul00000000000000This directory contains predefined namespaces for Glance Metadata Definitions Catalog. Files from this directory can be loaded into the database using db_load_metadefs command for glance-manage. Similarly you can unload the definitions using db_unload_metadefs command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/cim-processor-allocation-setting-data.json0000664000175000017500000001210600000000000025606 0ustar00zuulzuul00000000000000{ "namespace": "CIM::ProcessorAllocationSettingData", "display_name": "CIM Processor Allocation Setting", "description": "Properties related to the resource allocation settings of a processor (CPU) from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These are properties that identify processor setting data and may be specified to volume, image, host aggregate, flavor and Nova server as scheduler hint. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ProcessorAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_PASD_", "properties_target": "image" }, { "name": "OS::Glance::Image", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Server", "properties_target": "scheduler_hints" } ], "properties": { "InstructionSet": { "title": "Instruction Set", "description": "Identifies the instruction set of the processor within a processor architecture.", "operators": [""], "type": "string", "enum": [ "x86:i386", "x86:i486", "x86:i586", "x86:i686", "x86:64", "IA-64:IA-64", "AS/400:TIMI", "Power:Power_2.03", "Power:Power_2.04", "Power:Power_2.05", "Power:Power_2.06", "S/390:ESA/390", "S/390:z/Architecture", "S/390:z/Architecture_2", "PA-RISC:PA-RISC_1.0", "PA-RISC:PA-RISC_2.0", "ARM:A32", "ARM:A64", "MIPS:MIPS_I", "MIPS:MIPS_II", "MIPS:MIPS_III", "MIPS:MIPS_IV", "MIPS:MIPS_V", "MIPS:MIPS32", "MIPS64:MIPS64", "Alpha:Alpha", "SPARC:SPARC_V7", "SPARC:SPARC_V8", "SPARC:SPARC_V9", "SPARC:SPARC_JPS1", "SPARC:UltraSPARC2005", "SPARC:UltraSPARC2007", "68k:68000", "68k:68010", "68k:68020", "68k:68030", "68k:68040", "68k:68060" ] }, "ProcessorArchitecture": { "title": "Processor Architecture", "description": "Identifies the processor architecture of the processor.", "operators": [""], "type": "string", "enum": [ "x86", "IA-64", "AS/400", "Power", "S/390", "PA-RISC", "ARM", "MIPS", "Alpha", "SPARC", "68k" ] }, "InstructionSetExtensionName": { "title": "Instruction Set Extension", "description": "Identifies the instruction set extensions of the processor within a processor architecture.", "operators": ["", ""], "type": "array", "items": { "type": "string", "enum": [ "x86:3DNow", "x86:3DNowExt", "x86:ABM", "x86:AES", "x86:AVX", "x86:AVX2", "x86:BMI", "x86:CX16", "x86:F16C", "x86:FSGSBASE", "x86:LWP", "x86:MMX", "x86:PCLMUL", "x86:RDRND", "x86:SSE2", "x86:SSE3", "x86:SSSE3", "x86:SSE4A", "x86:SSE41", "x86:SSE42", "x86:FMA3", "x86:FMA4", "x86:XOP", "x86:TBM", "x86:VT-d", "x86:VT-x", "x86:EPT", "x86:SVM", "PA-RISC:MAX", "PA-RISC:MAX2", "ARM:DSP", "ARM:Jazelle-DBX", "ARM:Thumb", "ARM:Thumb-2", "ARM:ThumbEE)", "ARM:VFP", "ARM:NEON", "ARM:TrustZone", "MIPS:MDMX", "MIPS:MIPS-3D", "Alpha:BWX", "Alpha:FIX", "Alpha:CIX", "Alpha:MVI" ] } } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/cim-resource-allocation-setting-data.json0000664000175000017500000001561300000000000025424 0ustar00zuulzuul00000000000000{ "namespace": "CIM::ResourceAllocationSettingData", "display_name": "CIM Resource Allocation Setting Data", "description": "Properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim) that represent settings specifically related to an allocated resource that are outside the scope of the CIM class typically used to represent the resource itself. These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ResourceAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_RASD_", "properties_target": "image" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_RASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_RASD_" } ], "properties": { "Address": { "title": "Address", "description": "The address of the resource.", "type": "string" }, "AddressOnParent": { "title": "Address On Parent", "description": "Describes the address of this resource in the context of the Parent.", "type": "string" }, "AllocationUnits": { "title": "Allocation Units", "description": "This property specifies the units of allocation used by the Reservation and Limit properties.", "type": "string" }, "AutomaticAllocation": { "title": "Automatic Allocation", "description": "This property specifies if the resource will be automatically allocated.", "type": "boolean" }, "AutomaticDeallocation": { "title": "Automatic Deallocation", "description": "This property specifies if the resource will be automatically de-allocated.", "type": "boolean" }, "ConsumerVisibility": { "title": "Consumer Visibility", "description": "Describes the consumers visibility to the allocated resource.", "operators": [""], "type": "string", "enum": [ "Unknown", "Passed-Through", "Virtualized", "Not represented", "DMTF reserved", "Vendor Reserved" ] }, "Limit": { "title": "Limit", "description": "This property specifies the upper bound, or maximum amount of resource that will be granted for this allocation.", "type": "string" }, "MappingBehavior": { "title": "Mapping Behavior", "description": "Specifies how this resource maps to underlying resources. If the HostResource array contains any entries, this property reflects how the resource maps to those specific resources.", "operators": [""], "type": "string", "enum": [ "Unknown", "Not Supported", "Dedicated", "Soft Affinity", "Hard Affinity", "DMTF Reserved", "Vendor Reserved" ] }, "OtherResourceType": { "title": "Other Resource Type", "description": "A string that describes the resource type when a well defined value is not available and ResourceType has the value 'Other'.", "type": "string" }, "Parent": { "title": "Parent", "description": "The Parent of the resource.", "type": "string" }, "PoolID": { "title": "Pool ID", "description": "This property specifies which ResourcePool the resource is currently allocated from, or which ResourcePool the resource will be allocated from when the allocation occurs.", "type": "string" }, "Reservation": { "title": "Reservation", "description": "This property specifies the amount of resource guaranteed to be available for this allocation.", "type": "string" }, "ResourceSubType": { "title": "Resource Sub Type", "description": "A string describing an implementation specific sub-type for this resource.", "type": "string" }, "ResourceType": { "title": "Resource Type", "description": "The type of resource this allocation setting represents.", "operators": [""], "type": "string", "enum": [ "Other", "Computer System", "Processor", "Memory", "IDE Controller", "Parallel SCSI HBA", "FC HBA", "iSCSI HBA", "IB HCA", "Ethernet Adapter", "Other Network Adapter", "I/O Slot", "I/O Device", "Floppy Drive", "CD Drive", "DVD drive", "Disk Drive", "Tape Drive", "Storage Extent", "Other storage device", "Serial port", "Parallel port", "USB Controller", "Graphics controller", "IEEE 1394 Controller", "Partitionable Unit", "Base Partitionable Unit", "Power", "Cooling Capacity", "Ethernet Switch Port", "Logical Disk", "Storage Volume", "Ethernet Connection", "DMTF reserved", "Vendor Reserved" ] }, "VirtualQuantity": { "title": "Virtual Quantity", "description": "This property specifies the quantity of resources presented to the consumer.", "type": "string" }, "VirtualQuantityUnits": { "title": "Virtual Quantity Units", "description": "This property specifies the units used by the VirtualQuantity property.", "type": "string" }, "Weight": { "title": "Weight", "description": "This property specifies a relative priority for this allocation in relation to other allocations from the same ResourcePool.", "type": "string" }, "Connection": { "title": "Connection", "description": "The thing to which this resource is connected.", "type": "string" }, "HostResource": { "title": "Host Resource", "description": "This property exposes specific assignment of resources.", "type": "string" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/cim-storage-allocation-setting-data.json0000664000175000017500000001206000000000000025232 0ustar00zuulzuul00000000000000{ "namespace": "CIM::StorageAllocationSettingData", "display_name": "CIM Storage Allocation Setting Data", "description": "Properties related to the allocation of virtual storage from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_StorageAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_SASD_" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_SASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_SASD_" } ], "properties": { "Access": { "title": "Access", "description": "Access describes whether the allocated storage extent is 1 (readable), 2 (writeable), or 3 (both).", "operators": [""], "type": "string", "enum": [ "Unknown", "Readable", "Writeable", "Read/Write Supported", "DMTF Reserved" ] }, "HostExtentName": { "title": "Host Extent Name", "description": "A unique identifier for the host extent.", "type": "string" }, "HostExtentNameFormat": { "title": "Host Extent Name Format", "description": "The HostExtentNameFormat property identifies the format that is used for the value of the HostExtentName property.", "operators": [""], "type": "string", "enum": [ "Unknown", "Other", "SNVM", "NAA", "EUI64", "T10VID", "OS Device Name", "DMTF Reserved" ] }, "HostExtentNameNamespace": { "title": "Host Extent Name Namespace", "description": "If the host extent is a SCSI volume, then the preferred source for SCSI volume names is SCSI VPD Page 83 responses.", "operators": [""], "type": "string", "enum": [ "Unknown", "Other", "VPD83Type3", "VPD83Type2", "VPD83Type1", "VPD80", "NodeWWN", "SNVM", "OS Device Namespace", "DMTF Reserved" ] }, "HostExtentStartingAddress": { "title": "Host Extent Starting Address", "description": "The HostExtentStartingAddress property identifies the starting address on the host storage extent identified by the value of the HostExtentName property that is used for the allocation of the virtual storage extent.", "type": "string" }, "HostResourceBlockSize": { "title": "Host Resource Block Size", "description": "Size in bytes of the blocks that are allocated at the host as the result of this storage resource allocation or storage resource allocation request.", "type": "string" }, "Limit": { "title": "Limit", "description": "The maximum amount of blocks that will be granted for this storage resource allocation at the host.", "type": "string" }, "OtherHostExtentNameFormat": { "title": "Other Host Extent Name Format", "description": "A string describing the format of the HostExtentName property if the value of the HostExtentNameFormat property is 1 (Other).", "type": "string" }, "OtherHostExtentNameNamespace": { "title": "Other Host Extent Name Namespace", "description": "A string describing the namespace of the HostExtentName property if the value of the HostExtentNameNamespace matches 1 (Other).", "type": "string" }, "Reservation": { "title": "Reservation", "description": "The amount of blocks that are guaranteed to be available for this storage resource allocation at the host.", "type": "string" }, "VirtualQuantity": { "title": "Virtual Quantity", "description": "Number of blocks that are presented to the consumer.", "type": "string" }, "VirtualQuantityUnits": { "title": "Virtual Quantity Units", "description": "This property specifies the units used by the VirtualQuantity property.", "type": "string" }, "VirtualResourceBlockSize": { "title": "Virtual Resource Block Size", "description": "Size in bytes of the blocks that are presented to the consumer as the result of this storage resource allocation or storage resource allocation request.", "type": "string" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/cim-virtual-system-setting-data.json0000664000175000017500000001216700000000000024463 0ustar00zuulzuul00000000000000{ "namespace": "CIM::VirtualSystemSettingData", "display_name": "CIM Virtual System Setting Data", "description": "A set of virtualization specific properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim), which define the virtual aspects of a virtual system. These properties may be specified to host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_VirtualSystemSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate", "prefix": "CIM_VSSD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_VSSD_" } ], "properties": { "AutomaticRecoveryAction": { "title": "Automatic Recovery Action", "description": "Action to take for the virtual system when the software executed by the virtual system fails.", "operators": [""], "type": "string", "enum": [ "None", "Restart", "Revert to snapshot", "DMTF Reserved" ] }, "AutomaticShutdownAction": { "title": "Automatic Shutdown Action", "description": "Action to take for the virtual system when the host is shut down.", "operators": [""], "type": "string", "enum": [ "Turn Off", "Save state", "Shutdown", "DMTF Reserved" ] }, "AutomaticStartupAction": { "title": "Automatic Startup Action", "description": "Action to take for the virtual system when the host is started.", "operators": [""], "type": "string", "enum": [ "None", "Restart if previously active", "Always startup", "DMTF Reserved" ] }, "AutomaticStartupActionDelay": { "title": "Automatic Startup Action Delay", "description": "Delay applicable to startup action.", "type": "string" }, "AutomaticStartupActionSequenceNumber": { "title": "Automatic Startup Action Sequence Number", "description": "Number indicating the relative sequence of virtual system activation when the host system is started.", "type": "string" }, "ConfigurationDataRoot": { "title": "Configuration Data Root", "description": "Filepath of a directory where information about the virtual system configuration is stored.", "type": "string" }, "ConfigurationFile": { "title": "Configuration File", "description": "Filepath of a file where information about the virtual system configuration is stored.", "type": "string" }, "ConfigurationID": { "title": "Configuration ID", "description": "Unique id of the virtual system configuration.", "type": "string" }, "CreationTime": { "title": "Creation Time", "description": "Time when the virtual system configuration was created.", "type": "string" }, "LogDataRoot": { "title": "Log Data Root", "description": "Filepath of a directory where log information about the virtual system is stored.", "type": "string" }, "RecoveryFile": { "title": "Recovery File", "description": "Filepath of a file where recovery relateded information of the virtual system is stored.", "type": "string" }, "SnapshotDataRoot": { "title": "Snapshot Data Root", "description": "Filepath of a directory where information about virtual system snapshots is stored.", "type": "string" }, "SuspendDataRoot": { "title": "Suspend Data Root", "description": "Filepath of a directory where suspend related information about the virtual system is stored.", "type": "string" }, "SwapFileDataRoot": { "title": "Swap File Data Root", "description": "Filepath of a directory where swapfiles of the virtual system are stored.", "type": "string" }, "VirtualSystemIdentifier": { "title": "Virtual System Identifier", "description": "VirtualSystemIdentifier shall reflect a unique name for the system as it is used within the virtualization platform.", "type": "string" }, "VirtualSystemType": { "title": "Virtual System Type", "description": "VirtualSystemType shall reflect a particular type of virtual system.", "type": "string" }, "Notes": { "title": "Notes", "description": "End-user supplied notes that are related to the virtual system.", "type": "string" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-aggr-disk-filter.json0000664000175000017500000000210200000000000023114 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::AggregateDiskFilter", "display_name": "Disk Allocation per Host", "description": "Properties related to the Nova scheduler filter AggregateDiskFilter. Filters aggregate hosts based on the available disk space compared to the requested disk space. Hosts in the aggregate with not enough usable disk will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "disk_allocation_ratio": { "title": "Disk Subscription Ratio", "description": "Allows the host to be under and over subscribed for the amount of disk space requested for an instance. A ratio greater than 1.0 allows for over subscription (hosts may have less usable disk space than requested). A ratio less than 1.0 allows for under subscription.", "type": "number", "readonly": false } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-aggr-iops-filter.json0000664000175000017500000000202300000000000023136 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::AggregateIoOpsFilter", "display_name": "IO Ops per Host", "description": "Properties related to the Nova scheduler filter AggregateIoOpsFilter. Filters aggregate hosts based on the number of instances currently changing state. Hosts in the aggregate with too many instances changing state will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "max_io_ops_per_host": { "title": "Maximum IO Operations per Host", "description": "Prevents hosts in the aggregate that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve to be scheduled for new instances.", "type": "integer", "readonly": false, "default": 8, "minimum": 1 } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-aggr-num-instances.json0000664000175000017500000000160100000000000023466 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::AggregateNumInstancesFilter", "display_name": "Instances per Host", "description": "Properties related to the Nova scheduler filter AggregateNumInstancesFilter. Filters aggregate hosts by the number of running instances on it. Hosts in the aggregate with too many instances will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": false, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "max_instances_per_host": { "title": "Max Instances Per Host", "description": "Maximum number of instances allowed to run on a host in the aggregate.", "type": "integer", "readonly": false, "minimum": 0 } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-cpu-mode.json0000664000175000017500000000134100000000000021476 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::CPUMode", "display_name": "CPU Mode", "description": "This provides the preferred CPU Model to be used when booting up a guest VM.", "visibility": "public", "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "cpu_mode": { "title": "CPU Mode", "description": "Type of CPU Mode.", "type": "string", "enum": [ "none", "host-model", "host-passthrough", "custom" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-cpu-pinning.json0000664000175000017500000000241700000000000022221 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::CPUPinning", "display_name": "CPU Pinning", "description": "This provides the preferred CPU pinning and CPU thread pinning policy to be used when pinning vCPU of the guest to pCPU of the host. See http://docs.openstack.org/admin-guide/compute-numa-cpu-pinning.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "cpu_policy": { "title": "CPU Pinning policy", "description": "Type of CPU pinning policy.", "type": "string", "enum": [ "shared", "dedicated" ] }, "cpu_thread_policy": { "title": "CPU Thread Pinning Policy.", "description": "Type of CPU thread pinning policy.", "type": "string", "enum": [ "isolate", "prefer", "require" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-guest-memory-backing.json0000664000175000017500000000217100000000000024020 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::GuestMemoryBacking", "display_name": "Guest Memory Backing", "description": "This provides the preferred backing option for guest RAM. Guest's memory can be backed by hugepages to limit TLB lookups. See also: https://wiki.openstack.org/wiki/VirtDriverGuestCPUMemoryPlacement", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "hw:" }, { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" } ], "properties": { "mem_page_size": { "title": "Size of memory page", "description": "Page size to be used for Guest memory backing. Value can be specified as (i.e.: 2MB, 1GB) or 'any', 'small', 'large'. If this property is set in Image metadata then only 'any' and 'large' values are accepted in Flavor metadata by Nova API.", "type": "string" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-guest-shutdown.json0000664000175000017500000000166000000000000022771 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::GuestShutdownBehavior", "display_name": "Shutdown Behavior", "description": "These properties allow modifying the shutdown behavior for stop, rescue, resize, and shelve operations.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "os_shutdown_timeout": { "title": "Shutdown timeout", "description": "By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up.", "type": "integer", "minimum": 0 } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-host-capabilities.json0000664000175000017500000002176600000000000023406 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::HostCapabilities", "display_name": "Compute Host Capabilities", "description": "Hardware capabilities provided by the compute host. This provides the ability to fine tune the hardware specification required when an instance is requested. The ComputeCapabilitiesFilter should be enabled in the Nova scheduler to use these properties. When enabled, this filter checks that the capabilities provided by the compute host satisfy any extra specifications requested. Only hosts that can provide the requested capabilities will be eligible for hosting the instance.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "capabilities:" }, { "name": "OS::Nova::Aggregate", "prefix": "aggregate_instance_extra_specs:" } ], "properties": { "cpu_info:vendor": { "title": "Vendor", "description": "Specifies the CPU manufacturer.", "operators": [""], "type": "string", "enum": [ "Intel", "AMD" ] }, "cpu_info:model": { "title": "Model", "description": "Specifies the CPU model. Use this property to ensure that your vm runs on a specific cpu model.", "operators": [""], "type": "string", "enum": [ "Conroe", "Core2Duo", "Penryn", "Nehalem", "Westmere", "SandyBridge", "IvyBridge", "Haswell", "Broadwell", "Delhi", "Seoul", "Abu Dhabi", "Interlagos", "Kabini", "Valencia", "Zurich", "Budapest", "Barcelona", "Suzuka", "Shanghai", "Istanbul", "Lisbon", "Magny-Cours", "Cortex-A57", "Cortex-A53", "Cortex-A12", "Cortex-A17", "Cortex-A15", "Coretx-A7", "X-Gene" ] }, "cpu_info:arch": { "title": "Architecture", "description": "Specifies the CPU architecture. Use this property to specify the architecture supported by the hypervisor.", "operators": [""], "type": "string", "enum": [ "x86", "x86_64", "i686", "ia64", "ARMv8-A", "ARMv7-A" ] }, "cpu_info:topology:cores": { "title": "cores", "description": "Number of cores.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:topology:threads": { "title": "threads", "description": "Number of threads.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:topology:sockets": { "title": "sockets", "description": "Number of sockets.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:features": { "title": "Features", "description": "Specifies CPU flags/features. Using this property you can specify the required set of instructions supported by a vm.", "operators": ["", ""], "type": "array", "items": { "type": "string", "enum": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "pn", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", "syscall", "mp", "nx", "mmxext", "fxsr_opt", "pdpe1gb", "rdtscp", "lm", "3dnowext", "3dnow", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "tsc_reliable", "nonstop_tsc", "extd_apicid", "amd_dcm", "aperfmperf", "eagerfpu", "nonstop_tsc_s3", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "cid", "fma", "cx16", "xtpr", "pdcm", "pcid", "dca", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "hypervisor", "rng", "rng_en", "ace", "ace_en", "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy", "abm", "sse4a", "misalignsse", "3dnowprefetch", "osvw", "ibs", "xop", "skinit", "wdt", "lwp", "fma4", "tce", "nodeid_msr", "tbm", "topoext", "perfctr_core", "perfctr_nb", "bpext", "perfctr_l2", "mwaitx", "ida", "arat", "cpb", "epb", "pln", "pts", "dtherm", "hw_pstate", "proc_feedback", "hwp", "hwp_notify", "hwp_act_window", "hwp_epp", "hwp_pkg_req", "intel_pt", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "npt", "lbrv", "svm_lock", "nrip_save", "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists", "pausefilter", "pfthreshold", "vmmcall", "fsgsbase", "tsc_adjust", "bmi1", "hle", "avx2", "smep", "bmi2", "erms", "invpcid", "rtm", "cqm", "mpx", "avx512f", "rdseed", "adx", "smap", "pcommit", "clflushopt", "clwb", "avx512pf", "avx512er", "avx512cd", "sha_ni", "xsaveopt", "xsavec", "xgetbv1", "xsaves", "cqm_llc", "cqm_occup_llc", "clzero" ] } } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-hypervisor.json0000664000175000017500000000433500000000000022205 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::Hypervisor", "display_name": "Hypervisor Selection", "description": "OpenStack Compute supports many hypervisors, although most installations use only one hypervisor. For installations with multiple supported hypervisors, you can schedule different hypervisors using the ImagePropertiesFilter. This filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "hypervisor_type": { "title": "Hypervisor Type", "description": "Hypervisor type required by the image. Used with the ImagePropertiesFilter. \n\n Baremetal - physical provisioning. hyperv - Microsoft® hyperv. KVM - Kernel-based Virtual Machine. LXC - Linux Containers (through libvirt). powervm - IBM® PowerVM®. QEMU - Quick EMUlator. UML - User Mode Linux. vmware - VMware® vsphere. VZ - Virtuozzo OS Containers and Virtual Machines (through libvirt). For more information, see: https://docs.openstack.org/nova/latest/admin/configuration/hypervisors.html", "type": "string", "enum": [ "baremetal", "hyperv", "kvm", "lxc", "powervm", "qemu", "uml", "vmware", "vz", "xen" ] }, "vm_mode": { "title": "VM Mode", "description": "The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. Used with the ImagePropertiesFilter. \n\n hvm — Fully virtualized - This is the virtual machine mode (vm_mode) used by QEMU and KVM. \n\n xen - Xen 3.0 paravirtualized. \n\n uml — User Mode Linux paravirtualized. \n\n exe — Executables in containers. This is the mode used by LXC.", "type": "string", "enum": [ "hvm", "xen", "uml", "exe" ] } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-instance-data.json0000664000175000017500000000350500000000000022504 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::InstanceData", "display_name": "Instance Config Data", "description": "Instances can perform self-configuration based on data made available to the running instance. These properties affect instance configuration.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" } ], "properties": { "img_config_drive": { "title": "Config Drive", "description": "This property specifies whether or not Nova should use a config drive when booting the image. Mandatory means that Nova will always use a config drive when booting the image. OpenStack can be configured to write metadata to a special configuration drive that will be attached to the instance when it boots. The instance can retrieve any information from the config drive. One use case for the config drive is to pass network configuration information to the instance. See also: http://docs.openstack.org/user-guide/cli_config_drive.html", "type": "string", "enum": [ "optional", "mandatory" ] }, "os_require_quiesce": { "title": "Require Quiescent File system", "description": "This property specifies whether or not the filesystem must be quiesced during snapshot processing. For volume backed and image backed snapshots, yes means that snapshotting is aborted when quiescing fails, whereas, no means quiescing will be skipped and snapshot processing will continue after the quiesce failure.", "type": "string", "enum": [ "yes", "no" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-libvirt-image.json0000664000175000017500000001740600000000000022531 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::LibvirtImage", "display_name": "libvirt Driver Options for Images", "description": "The libvirt Compute Driver Options for Glance Images. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "hw_cdrom_bus": { "title": "CD-ROM Bus", "description": "Specifies the type of disk controller to attach CD-ROM devices to.", "type": "string", "enum": [ "scsi", "virtio", "uml", "xen", "ide", "usb", "fdc", "sata", "lxc" ] }, "hw_disk_bus": { "title": "Disk Bus", "description": "Specifies the type of disk controller to attach disk devices to.", "type": "string", "enum": [ "scsi", "virtio", "uml", "xen", "ide", "usb", "fdc", "sata", "lxc" ] }, "hw_firmware_type": { "title": "Firmware Type", "description": "Specifies whether the image should be booted with a legacy BIOS or with UEFI.", "type": "string", "enum": [ "bios", "uefi" ] }, "hw_firmware_stateless": { "title": "Stateless Firmware", "description": "Specifies whether the image should be booted with only read-only firmware image. If true, firmware configurations do not persist over server reboot. Note stateless firmware is only supported when UEFI is used.", "type": "boolean" }, "hw_rng_model": { "title": "Random Number Generator Device", "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", "type": "string", "default": "virtio" }, "hw_machine_type": { "title": "Machine Type", "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", "type": "string" }, "hw_scsi_model": { "title": "SCSI Model", "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", "type": "string", "default": "virtio-scsi" }, "hw_video_model": { "title": "Video Model", "description": "The graphic device model presented to the guest. hw_video_model=none disables the graphics device in the guest and should generally be used when using gpu passthrough.", "type": "string", "enum": [ "vga", "cirrus", "vmvga", "xen", "qxl", "virtio", "gop", "none", "bochs" ] }, "hw_video_ram": { "title": "Max Video Ram", "description": "Maximum RAM (unit: MB) for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", "type": "integer", "minimum": 0 }, "os_command_line": { "title": "Kernel Command Line", "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", "type": "string" }, "os_type": { "title": "OS Type", "description": "The operating system installed on the image. The libvirt driver contains logic that takes different actions depending on the value of the os_type parameter of the image. For example, for os_type=windows images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters.", "type": "string", "enum": [ "linux", "windows" ] }, "hw_vif_model": { "title": "Virtual Network Interface", "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor configuration. libvirt driver options: KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, spapr-vlan, virtio, e1000e and vmxnet3. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", "type": "string", "enum": [ "e1000", "e1000e", "ne2k_pci", "netfront", "pcnet", "rtl8139", "spapr-vlan", "virtio", "vmxnet3" ] }, "hw_vif_multiqueue_enabled": { "title": "Multiqueue Enabled", "description": "If true, this enables the virtio-net multiqueue feature. In this case, the driver sets the number of queues equal to the number of guest vCPUs. This makes the network performance scale across a number of vCPUs.", "type": "string", "enum": ["true", "false"] }, "hw_pmu": { "title": "Virtual Performance Monitoring Unit", "description": "Controls emulation of a vPMU in the guest. To reduce latency in realtime workloads disable the vPMU by setting hw_pmu=false", "type": "string", "enum": ["true", "false"] }, "hw_qemu_guest_agent": { "title": "QEMU Guest Agent", "description": "This is a background process which helps management applications execute guest OS level commands. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", "type": "string", "enum": ["yes", "no"] }, "hw_pointer_model": { "title": "Pointer Model", "description": "Input devices allow interaction with a graphical framebuffer. For example to provide a graphic tablet for absolute cursor movement. Currently only supported by the KVM/QEMU hypervisor configuration and VNC or SPICE consoles must be enabled.", "type": "string", "enum": ["usbtablet"] }, "img_hide_hypervisor_id": { "title": "Hide hypervisor id", "description": "Enables hiding the host hypervisor signature in the guest OS.", "type": "string", "enum": ["yes", "no"] } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-libvirt.json0000664000175000017500000000464000000000000021445 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::Libvirt", "display_name": "libvirt Driver Options", "description": "The libvirt compute driver options. \n\nThese are properties that affect the libvirt compute driver and may be specified on flavors and images. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "serial_port_count": { "title": "Serial Port Count", "description": "Specifies the count of serial ports that should be provided. If hw:serial_port_count is not set in the flavor's extra_specs, then any count is permitted. If hw:serial_port_count is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value.", "type": "integer", "minimum": 0 }, "boot_menu": { "title": "Boot Menu", "description": "If true, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed.", "type": "string", "enum": ["true", "false"] }, "mem_encryption": { "title": "Hardware Memory Encryption", "description": "Enables encryption of guest memory at the hardware level, if there are compute hosts available which support this. See https://docs.openstack.org/nova/latest/admin/configuration/hypervisor-kvm.html#amd-sev-secure-encrypted-virtualization for details.", "type": "string", "enum": ["true", "false"] } , "virtio_packed_ring": { "title": "Virtio Packed Ring", "description": "Enables Packed VIRT-IO Queue feature. When set to true, instance will bescheduled to hosts that support negotiating the packed virt queue format. This feature may or may not be enabled depending on the guest driver. When used it will improve the small packet performance of network io.", "type": "string", "enum": ["true", "false"] } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-quota.json0000664000175000017500000001604400000000000021124 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::Quota", "display_name": "Flavor Quota", "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "objects": [ { "name": "CPU Limits", "description": "You can configure the CPU limits with control parameters.", "properties": { "quota:cpu_shares": { "title": "Quota: CPU Shares", "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "type": "integer" }, "quota:cpu_period": { "title": "Quota: CPU Period", "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "type": "integer", "minimum": 1000, "maximum": 1000000 }, "quota:cpu_quota": { "title": "Quota: CPU Quota", "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "type": "integer" } } }, { "name": "Disk QoS", "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", "properties": { "quota:disk_read_bytes_sec": { "title": "Quota: Disk read bytes / sec", "description": "Sets disk I/O quota for disk read bytes / sec.", "type": "integer" }, "quota:disk_read_iops_sec": { "title": "Quota: Disk read IOPS / sec", "description": "Sets disk I/O quota for disk read IOPS / sec.", "type": "integer" }, "quota:disk_write_bytes_sec": { "title": "Quota: Disk Write Bytes / sec", "description": "Sets disk I/O quota for disk write bytes / sec.", "type": "integer" }, "quota:disk_write_iops_sec": { "title": "Quota: Disk Write IOPS / sec", "description": "Sets disk I/O quota for disk write IOPS / sec.", "type": "integer" }, "quota:disk_total_bytes_sec": { "title": "Quota: Disk Total Bytes / sec", "description": "Sets disk I/O quota for total disk bytes / sec.", "type": "integer" }, "quota:disk_total_iops_sec": { "title": "Quota: Disk Total IOPS / sec", "description": "Sets disk I/O quota for disk total IOPS / sec.", "type": "integer" } } }, { "name": "Virtual Interface QoS", "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", "properties": { "quota:vif_inbound_average": { "title": "Quota: VIF Inbound Average", "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "type": "integer" }, "quota:vif_inbound_burst": { "title": "Quota: VIF Inbound Burst", "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "type": "integer" }, "quota:vif_inbound_peak": { "title": "Quota: VIF Inbound Peak", "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", "type": "integer" }, "quota:vif_outbound_average": { "title": "Quota: VIF Outbound Average", "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "type": "integer" }, "quota:vif_outbound_burst": { "title": "Quota: VIF Outbound Burst", "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "type": "integer" }, "quota:vif_outbound_peak": { "title": "Quota: VIF Outbound Peak", "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", "type": "integer" } } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-randomgen.json0000664000175000017500000000203400000000000021737 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::RandomNumberGenerator", "display_name": "Random Number Generator", "description": "If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "hw_rng:allowed": { "title": "Random Number Generator Allowed", "description": "", "type": "boolean" }, "hw_rng:rate_bytes": { "title": "Random number generator limits.", "description": "Allowed amount of bytes that the guest can read from the host's entropy per period.", "type": "integer" }, "hw_rng:rate_period": { "title": "Random number generator read period.", "description": "Duration of the read period in milliseconds.", "type": "integer" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vcputopology.json0000664000175000017500000000372400000000000022546 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::VirtCPUTopology", "display_name": "Virtual CPU Topology", "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: https://opendev.org/openstack/nova-specs/src/branch/master/specs/juno/implemented/virt-driver-vcpu-topology.rst", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "cpu_sockets": { "title": "vCPU Sockets", "description": "Preferred number of sockets to expose to the guest.", "type": "integer" }, "cpu_cores": { "title": "vCPU Cores", "description": "Preferred number of cores to expose to the guest.", "type": "integer" }, "cpu_threads": { "title": " vCPU Threads", "description": "Preferred number of threads to expose to the guest.", "type": "integer" }, "cpu_max_sockets": { "title": "Max vCPU Sockets", "description": "Maximum number of sockets to expose to the guest.", "type": "integer" }, "cpu_max_cores": { "title": "Max vCPU Cores", "description": "Maximum number of cores to expose to the guest.", "type": "integer" }, "cpu_max_threads": { "title": "Max vCPU Threads", "description": "Maximum number of threads to expose to the guest.", "type": "integer" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vmware-flavor.json0000664000175000017500000000335500000000000022564 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::VMwareFlavor", "display_name": "VMware Driver Options for Flavors", "description": "VMware Driver Options for Flavors may be used to customize and manage Nova Flavors. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. See: http://docs.openstack.org/admin-guide/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "vmware:hw_version": { "title": "VMware Hardware Version", "description": "Specifies the hardware version VMware uses to create images. If the hardware version needs to be compatible with a cluster version, for backward compatibility or other circumstances, the vmware:hw_version key specifies a virtual machine hardware version. In the event that a cluster has mixed host version types, the key will enable the vCenter to place the cluster on the correct host.", "type": "string", "enum": [ "vmx-13", "vmx-11", "vmx-10", "vmx-09", "vmx-08", "vmx-07", "vmx-04", "vmx-03" ] }, "vmware:storage_policy": { "title": "VMware Storage Policy", "description": "Specifies the storage policy to be applied for newly created instance. If not provided, the default storage policy specified in config file will be used. If Storage Policy Based Management (SPBM) is not enabled in config file, this value won't be used.", "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vmware-quota-flavor.json0000664000175000017500000000315400000000000023710 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::VMwareQuotaFlavor", "display_name": "VMware Quota for Flavors", "description": "The VMware compute driver allows various compute quotas to be specified on flavors. When specified, the VMWare driver will ensure that the quota is enforced. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of hypervisors, see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix. For flavor customization, see: http://docs.openstack.org/admin-guide/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "quota:cpu_limit": { "title": "Quota: CPU Limit", "description": "Specifies the upper limit for CPU allocation in MHz. This parameter ensures that a machine never uses more than the defined amount of CPU time. It can be used to enforce a limit on the machine's CPU performance. The value should be a numerical value in MHz. If zero is supplied then the cpu_limit is unlimited.", "type": "integer", "minimum": 0 }, "quota:cpu_reservation": { "title": "Quota: CPU Reservation Limit", "description": "Specifies the guaranteed minimum CPU reservation in MHz. This means that if needed, the machine will definitely get allocated the reserved amount of CPU cycles. The value should be a numerical value in MHz.", "type": "integer", "minimum": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vmware.json0000664000175000017500000002037100000000000021272 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::VMware", "display_name": "VMware Driver Options", "description": "The VMware compute driver options. \n\nThese are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "img_linked_clone":{ "title": "Linked Clone", "description": "By default, the VMware compute driver creates linked clones when possible (though this can be turned off by the operator). You can use this image property on a per-image basis to control whether virtual machines booted from the image are treated as full clones (value: false) or linked clones (value: true). Please refer to VMware documentation for information about full vs. linked clones.", "type": "boolean" }, "vmware_adaptertype": { "title": "Disk Adapter Type", "description": "The virtual SCSI or IDE controller used by the hypervisor.", "type": "string", "enum": [ "lsiLogic", "lsiLogicsas", "paraVirtual", "busLogic", "ide" ], "default" : "lsiLogic" }, "vmware_disktype": { "title": "Disk Provisioning Type", "description": "When performing operations such as creating a virtual disk, cloning, or migrating, the disk provisioning type may be specified. Please refer to VMware documentation for more.", "type": "string", "enum": [ "streamOptimized", "sparse", "preallocated" ], "default" : "preallocated" }, "vmware_ostype": { "title": "OS Type", "description": "A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to otherGuest. See thinkvirt.com.", "type": "string", "enum": [ "asianux3_64Guest", "asianux3Guest", "asianux4_64Guest", "asianux4Guest", "asianux5_64Guest", "asianux7_64Guest", "centos64Guest", "centosGuest", "centos6Guest", "centos6_64Guest", "centos7_64Guest", "coreos64Guest", "darwin10_64Guest", "darwin10Guest", "darwin11_64Guest", "darwin11Guest", "darwin12_64Guest", "darwin13_64Guest", "darwin14_64Guest", "darwin15_64Guest", "darwin16_64Guest", "darwin64Guest", "darwinGuest", "debian4_64Guest", "debian4Guest", "debian5_64Guest", "debian5Guest", "debian6_64Guest", "debian6Guest", "debian7_64Guest", "debian7Guest", "debian8_64Guest", "debian8Guest", "debian9_64Guest", "debian9Guest", "debian10_64Guest", "debian10Guest", "dosGuest", "eComStation2Guest", "eComStationGuest", "fedora64Guest", "fedoraGuest", "freebsd64Guest", "freebsdGuest", "genericLinuxGuest", "mandrakeGuest", "mandriva64Guest", "mandrivaGuest", "netware4Guest", "netware5Guest", "netware6Guest", "nld9Guest", "oesGuest", "openServer5Guest", "openServer6Guest", "opensuse64Guest", "opensuseGuest", "oracleLinux64Guest", "oracleLinuxGuest", "oracleLinux6Guest", "oracleLinux6_64Guest", "oracleLinux7_64Guest", "os2Guest", "other24xLinux64Guest", "other24xLinuxGuest", "other26xLinux64Guest", "other26xLinuxGuest", "other3xLinux64Guest", "other3xLinuxGuest", "otherGuest", "otherGuest64", "otherLinux64Guest", "otherLinuxGuest", "redhatGuest", "rhel2Guest", "rhel3_64Guest", "rhel3Guest", "rhel4_64Guest", "rhel4Guest", "rhel5_64Guest", "rhel5Guest", "rhel6_64Guest", "rhel6Guest", "rhel7_64Guest", "rhel7Guest", "sjdsGuest", "sles10_64Guest", "sles10Guest", "sles11_64Guest", "sles11Guest", "sles12_64Guest", "sles12Guest", "sles64Guest", "slesGuest", "solaris10_64Guest", "solaris10Guest", "solaris11_64Guest", "solaris6Guest", "solaris7Guest", "solaris8Guest", "solaris9Guest", "turboLinux64Guest", "turboLinuxGuest", "ubuntu64Guest", "ubuntuGuest", "unixWare7Guest", "vmkernel5Guest", "vmkernel6Guest", "vmkernel65Guest", "vmkernelGuest", "vmwarePhoton64Guest", "win2000AdvServGuest", "win2000ProGuest", "win2000ServGuest", "win31Guest", "win95Guest", "win98Guest", "windows7_64Guest", "windows7Guest", "windows7Server64Guest", "windows8_64Guest", "windows8Guest", "windows8Server64Guest", "windows9_64Guest", "windows9Guest", "windows9Server64Guest", "windowsHyperVGuest", "winLonghorn64Guest", "winLonghornGuest", "winMeGuest", "winNetBusinessGuest", "winNetDatacenter64Guest", "winNetDatacenterGuest", "winNetEnterprise64Guest", "winNetEnterpriseGuest", "winNetStandard64Guest", "winNetStandardGuest", "winNetWebGuest", "winNTGuest", "winVista64Guest", "winVistaGuest", "winXPHomeGuest", "winXPPro64Guest", "winXPProGuest" ], "default": "otherGuest" }, "hw_vif_model": { "title": "Virtual Network Interface", "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor. VMware driver supported options: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, and VirtualVmxnet.", "type": "string", "enum": [ "e1000", "e1000e", "VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet", "VirtualVmxnet3" ], "default" : "e1000" }, "hw_vif_multiqueue_enabled": { "title": "Multiqueue Enabled", "description": "If true, this enables the virtio-net multiqueue feature. In this case, the driver sets the number of queues equal to the number of guest vCPUs. This makes the network performance scale across a number of vCPUs.", "type": "string", "enum": ["true", "false"] } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vtpm-hw.json0000664000175000017500000000143700000000000021375 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::TPM", "display_name": "TPM Options", "description": "Configuration options for TPM", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "hw:" }, { "name": "OS::Glance::Image", "prefix": "hw_" } ], "properties": { "tpm_model": { "title": "TPM model", "description": "TPM model to use. Option CRB is only valid for TPM version 2.0. Defaults to TIS.", "operators": [""], "type": "string", "enum": [ "TIS", "CRB" ], "default": "TIS" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-vtpm.json0000664000175000017500000000205700000000000020760 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::VTPM", "display_name": "Emulated Virtual TPM", "description": "Configuration options for Emulated Virtual TPM", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "traits:" }, { "name": "OS::Glance::Image", "prefix": "traits:" } ], "properties": { "COMPUTE_SECURITY_TPM_1_2": { "title": "Virtual TPM Version 1.2 Support", "description": "Enables virtual TPM version 1.2.", "type": "string", "enum": [ "required" ], "default": "required" }, "COMPUTE_SECURITY_TPM_2_0": { "title": "Virtual TPM Version 2.0 Support", "description": "Enables virtual TPM version 2.0.", "type": "string", "enum": [ "required" ], "default": "required" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-watchdog.json0000664000175000017500000000262600000000000021574 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::Watchdog", "display_name": "Watchdog Behavior", "description": "Compute drivers may enable watchdog behavior over instances. See: http://docs.openstack.org/admin-guide/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "watchdog_action": { "title": "Watchdog Action", "description": "For the libvirt driver, you can enable and set the behavior of a virtual hardware watchdog device for each flavor. Watchdog devices keep an eye on the guest server, and carry out the configured action, if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If hw_watchdog_action is not specified, the watchdog is disabled. Watchdog behavior set using a specific image's properties will override behavior set using flavors.", "type": "string", "enum": [ "disabled", "reset", "poweroff", "pause", "none" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/compute-xenapi.json0000664000175000017500000000174600000000000021262 0ustar00zuulzuul00000000000000{ "namespace": "OS::Compute::XenAPI", "display_name": "XenAPI Driver Options", "description": "The XenAPI compute driver options. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "auto_disk_config": { "title": "Disk Adapter Type", "description": "If true, the root partition on the disk is automatically resized before the instance boots. This value is only taken into account by the Compute service when using a Xen-based hypervisor with the XenAPI driver. The Compute service will only attempt to resize if there is a single partition on the image, and only if the partition is in ext3 or ext4 format.", "type": "boolean" } }, "objects": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/glance-common-image-props.json0000664000175000017500000000657700000000000023273 0ustar00zuulzuul00000000000000{ "display_name": "Common Image Properties", "namespace": "OS::Glance::CommonImageProperties", "description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.", "protected": true, "resource_type_associations" : [ ], "properties": { "kernel_id": { "title": "Kernel ID", "type": "string", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." }, "ramdisk_id": { "title": "Ramdisk ID", "type": "string", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." }, "instance_uuid": { "title": "Instance ID", "type": "string", "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" }, "architecture": { "title": "CPU Architecture", "description": "The CPU architecture that must be supported by the hypervisor. For example, x86_64, arm, or ppc64. Run uname -m to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the libosinfo project for this purpose.", "type": "string" }, "os_distro": { "title": "OS Distro", "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", "type": "string" }, "hw_cpu_mode": { "title": "CPU Mode", "description": "This specifies the CPU Mode for the guest vm", "type": "string" }, "os_version": { "title": "OS Version", "description": "Operating system version as specified by the distributor. (for example, '11.10')", "type": "string" }, "description": { "title": "Image description", "description": "A human-readable string describing this image.", "type": "string" }, "cinder_encryption_key_id": { "title": "Cinder Encryption Key ID", "description": "Identifier in the OpenStack Key Management Service for the encryption key for the Block Storage Service to use when mounting a volume created from this image", "type": "string" }, "cinder_encryption_key_deletion_policy": { "title": "Cinder Encryption Key Deletion Policy", "description": "States the condition under which the Image Service will delete the object associated with the 'cinder_encryption_key_id' image property. If this property is missing, the Image Service will take no action", "type": "string", "enum": [ "on_image_deletion", "do_not_delete" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/image-signature-verification.json0000664000175000017500000000322700000000000024061 0ustar00zuulzuul00000000000000{ "namespace": "OS::Glance::Signatures", "display_name": "Image Signature Verification", "description": "Image signature verification allows the user to verify that an image has not been modified prior to booting the image.", "visibility": "public", "protected": false, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "img_signature": { "title": "Image Signature", "description": "The signature of the image data encoded in base64 format.", "type": "string" }, "img_signature_certificate_uuid": { "title": "Image Signature Certificate UUID", "description": "The UUID used to retrieve the certificate from the key manager.", "type": "string" }, "img_signature_hash_method": { "title": "Image Signature Hash Method", "description": "The hash method used in creating the signature.", "type": "string", "enum": [ "SHA-224", "SHA-256", "SHA-384", "SHA-512" ] }, "img_signature_key_type": { "title": "Image Signature Key Type", "description": "The key type used in creating the signature.", "type": "string", "enum": [ "RSA-PSS", "DSA", "ECC_SECT571K1", "ECC_SECT409K1", "ECC_SECT571R1", "ECC_SECT409R1", "ECC_SECP521R1", "ECC_SECP384R1" ] } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/operating-system.json0000664000175000017500000000244200000000000021630 0ustar00zuulzuul00000000000000{ "display_name": "Common Operating System Properties", "namespace": "OS::OperatingSystem", "description": "Details of the operating system contained within this image as well as common operating system properties that can be set on a VM instance created from this image.", "protected": true, "resource_type_associations" : [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" } ], "properties": { "os_distro": { "title": "OS Distro", "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", "type": "string" }, "os_version": { "title": "OS Version", "description": "Operating system version as specified by the distributor. (for example, '11.10')", "type": "string" }, "os_admin_user": { "title": "OS Admin User", "description": "The name of the user with admin privileges.", "type": "string" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/software-databases.json0000664000175000017500000004445300000000000022105 0ustar00zuulzuul00000000000000{ "namespace": "OS::Software::DBMS", "display_name": "Database Software", "description": "A database is an organized collection of data. The data is typically organized to model aspects of reality in a way that supports processes requiring information. Database management systems are computer software applications that interact with the user, other applications, and the database itself to capture and analyze data. (http://en.wikipedia.org/wiki/Database)", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" }, { "name": "OS::Trove::Instance" } ], "objects": [ { "name": "MySQL", "description": "MySQL is an object-relational database management system (ORDBMS). The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, now owned by Oracle Corporation. MySQL is a popular choice of database for use in web applications, and is a central component of the widely used LAMP open source web application software stack (and other 'AMP' stacks). (http://en.wikipedia.org/wiki/MySQL)", "properties": { "sw_database_mysql_version": { "title": "Version", "description": "The specific version of MySQL.", "type": "string" }, "sw_database_mysql_listen_port": { "title": "Listen Port", "description": "The configured TCP/IP port which MySQL listens for incoming connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 3606 }, "sw_database_mysql_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "root" } } }, { "name": "PostgreSQL", "description": "PostgreSQL, often simply 'Postgres', is an object-relational database management system (ORDBMS) with an emphasis on extensibility and standards-compliance. PostgreSQL is cross-platform and runs on many operating systems. (http://en.wikipedia.org/wiki/PostgreSQL)", "properties": { "sw_database_postgresql_version": { "title": "Version", "description": "The specific version of PostgreSQL.", "type": "string" }, "sw_database_postgresql_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which PostgreSQL is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5432 }, "sw_database_postgresql_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "postgres" } } }, { "name": "SQL Server", "description": "Microsoft SQL Server is a relational database management system developed by Microsoft. There are at least a dozen different editions of Microsoft SQL Server aimed at different audiences and for workloads ranging from small single-machine applications to large Internet-facing applications with many concurrent users. Its primary query languages are T-SQL and ANSI SQL. (http://en.wikipedia.org/wiki/Microsoft_SQL_Server)", "properties": { "sw_database_sqlserver_version": { "title": "Version", "description": "The specific version of Microsoft SQL Server.", "type": "string" }, "sw_database_sqlserver_edition": { "title": "Edition", "description": "SQL Server is available in multiple editions, with different feature sets and targeting different users.", "type": "string", "default": "Express", "enum": [ "Datacenter", "Enterprise", "Standard", "Web", "Business Intelligence", "Workgroup", "Express", "Compact (SQL CE)", "Developer", "Embedded (SSEE)", "Express", "Fast Track", "LocalDB", "Parallel Data Warehouse (PDW)", "Business Intelligence", "Datawarehouse Appliance Edition" ] }, "sw_database_sqlserver_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which SQL Server is to listen for connections from client applications. The default SQL Server port is 1433, and client ports are assigned a random value between 1024 and 5000.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 1433 }, "sw_database_postsqlserver_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "sa" } } }, { "name": "Oracle", "description": "Oracle Database (commonly referred to as Oracle RDBMS or simply as Oracle) is an object-relational database management system produced and marketed by Oracle Corporation. (http://en.wikipedia.org/wiki/Oracle_Database)", "properties": { "sw_database_oracle_version": { "title": "Version", "description": "The specific version of Oracle.", "type": "string" }, "sw_database_oracle_edition": { "title": "Edition", "description": "Over and above the different versions of the Oracle database management software developed over time, Oracle Corporation subdivides its product into varying editions.", "type": "string", "default": "Express", "enum": [ "Enterprise", "Standard", "Standard Edition One", "Express (XE)", "Workgroup", "Lite" ] }, "sw_database_oracle_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Oracle is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 1521 } } }, { "name": "DB2", "description": "IBM DB2 is a family of database server products developed by IBM. These products all support the relational model, but in recent years some products have been extended to support object-relational features and non-relational structures, in particular XML. (http://en.wikipedia.org/wiki/IBM_DB2)", "properties": { "sw_database_db2_version": { "title": "Version", "description": "The specific version of DB2.", "type": "string" }, "sw_database_db2_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which DB2 is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5432 }, "sw_database_db2_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "MongoDB", "description": "MongoDB is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB uses JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster. Released under a combination of the GNU Affero General Public License and the Apache License, MongoDB is free and open-source software. (http://en.wikipedia.org/wiki/MongoDB)", "properties": { "sw_database_mongodb_version": { "title": "Version", "description": "The specific version of MongoDB.", "type": "string" }, "sw_database_mongodb_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which MongoDB is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 27017 }, "sw_database_mongodb_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "Couchbase Server", "description": "Couchbase Server, originally known as Membase, is an open source, distributed (shared-nothing architecture) NoSQL document-oriented database that is optimized for interactive applications. These applications must serve many concurrent users by creating, storing, retrieving, aggregating, manipulating and presenting data. In support of these kinds of application needs, Couchbase is designed to provide easy-to-scale key-value or document access with low latency and high sustained throughput. (http://en.wikipedia.org/wiki/Couchbase_Server)", "properties": { "sw_database_couchbaseserver_version": { "title": "Version", "description": "The specific version of Couchbase Server.", "type": "string" }, "sw_database_couchbaseserver_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Couchbase is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 11211 }, "sw_database_couchbaseserver_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "admin" } } }, { "name": "Redis", "description": "Redis is a data structure server (NoSQL). It is open-source, networked, in-memory, and stores keys with optional durability. The development of Redis has been sponsored by Pivotal Software since May 2013; before that, it was sponsored by VMware. The name Redis means REmote DIctionary Server. (http://en.wikipedia.org/wiki/Redis)", "properties": { "sw_database_redis_version": { "title": "Version", "description": "The specific version of Redis.", "type": "string" }, "sw_database_redis_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Redis is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 6379 }, "sw_database_redis_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "admin" } } }, { "name": "CouchDB", "description": "Apache CouchDB, commonly referred to as CouchDB, is an open source NoSQL database. It is a NoSQL database that uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. One of its distinguishing features is multi-master replication. CouchDB was first released in 2005 and later became an Apache project in 2008. (http://en.wikipedia.org/wiki/CouchDB)", "properties": { "sw_database_couchdb_version": { "title": "Version", "description": "The specific version of CouchDB.", "type": "string" }, "sw_database_couchdb_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which CouchDB is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5984 }, "sw_database_couchdb_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "Apache Cassandra", "description": "Apache Cassandra is an open source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. (http://en.wikipedia.org/wiki/Apache_Cassandra)", "properties": { "sw_database_cassandra_version": { "title": "Version", "description": "The specific version of Apache Cassandra.", "type": "string" }, "sw_database_cassandra_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Cassandra is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 9160 }, "sw_database_cassandra_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "cassandra" } } }, { "name": "HBase", "description": "HBase is an open source, non-relational (NoSQL), distributed database modeled after Google's BigTable and written in Java. It is developed as part of Apache Software Foundation's Apache Hadoop project and runs on top of HDFS (Hadoop Distributed Filesystem), providing BigTable-like capabilities for Hadoop. (http://en.wikipedia.org/wiki/Apache_HBase)", "properties": { "sw_database_hbase_version": { "title": "Version", "description": "The specific version of HBase.", "type": "string" } } }, { "name": "Hazlecast", "description": "In computing, Hazelcast is an in-memory open source software data grid based on Java. By having multiple nodes form a cluster, data is evenly distributed among the nodes. This allows for horizontal scaling both in terms of available storage space and processing power. Backups are also distributed in a similar fashion to other nodes, based on configuration, thereby protecting against single node failure. (http://en.wikipedia.org/wiki/Hazelcast)", "properties": { "sw_database_hazlecast_version": { "title": "Version", "description": "The specific version of Hazlecast.", "type": "string" }, "sw_database_hazlecast_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Hazlecast is to listen for connections between members.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5701 } } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/software-runtimes.json0000664000175000017500000001220400000000000022011 0ustar00zuulzuul00000000000000{ "namespace": "OS::Software::Runtimes", "display_name": "Runtime Environment", "description": "Software is written in a specific programming language and the language must execute within a runtime environment. The runtime environment provides an abstraction to utilizing a computer's processor, memory (RAM), and other system resources.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" } ], "objects": [ { "name": "PHP", "description": "PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends resulting output to its client, usually in form of a part of the generated web page – for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications. (http://en.wikipedia.org/wiki/PHP)", "properties": { "sw_runtime_php_version": { "title": "Version", "description": "The specific version of PHP.", "type": "string" } } }, { "name": "Python", "description": "Python is a widely used general-purpose, high-level programming language. Its design philosophy emphasizes code readability, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java. The language provides constructs intended to enable clear programs on both a small and large scale. Python supports multiple programming paradigms, including object-oriented, imperative and functional programming or procedural styles. It features a dynamic type system and automatic memory management and has a large and comprehensive standard library. (http://en.wikipedia.org/wiki/Python_(programming_language))", "properties": { "sw_runtime_python_version": { "title": "Version", "description": "The specific version of python.", "type": "string" } } }, { "name": "Java", "description": "Java is a functional computer programming language that is concurrent, class-based, object-oriented, and specifically designed to have as few implementation dependencies as possible. It is intended to let application developers write once, run anywhere (WORA), meaning that code that runs on one platform does not need to be recompiled to run on another. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of computer architecture. (http://en.wikipedia.org/wiki/Java_(programming_language))", "properties": { "sw_runtime_java_version": { "title": "Version", "description": "The specific version of Java.", "type": "string" } } }, { "name": "Ruby", "description": "Ruby is a dynamic, reflective, object-oriented, general-purpose programming language. It was designed and developed in the mid-1990s by Yukihiro Matsumoto in Japan. According to its authors, Ruby was influenced by Perl, Smalltalk, Eiffel, Ada, and Lisp. It supports multiple programming paradigms, including functional, object-oriented, and imperative. It also has a dynamic type system and automatic memory management. (http://en.wikipedia.org/wiki/Python_(programming_language))", "properties": { "sw_runtime_ruby_version": { "title": "Version", "description": "The specific version of Ruby.", "type": "string" } } }, { "name": "Perl", "description": "Perl is a family of high-level, general-purpose, interpreted, dynamic programming languages. The languages in this family include Perl 5 and Perl 6. Though Perl is not officially an acronym, there are various backronyms in use, the most well-known being Practical Extraction and Reporting Language (http://en.wikipedia.org/wiki/Perl)", "properties": { "sw_runtime_perl_version": { "title": "Version", "description": "The specific version of Perl.", "type": "string" } } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/software-webservers.json0000664000175000017500000001321400000000000022334 0ustar00zuulzuul00000000000000{ "namespace": "OS::Software::WebServers", "display_name": "Web Servers", "description": "A web server is a computer system that processes requests via HTTP, the basic network protocol used to distribute information on the World Wide Web. The most common use of web servers is to host websites, but there are other uses such as gaming, data storage, running enterprise applications, handling email, FTP, or other web uses. (http://en.wikipedia.org/wiki/Web_server)", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" } ], "objects": [ { "name": "Apache HTTP Server", "description": "The Apache HTTP Server, colloquially called Apache, is a Web server application notable for playing a key role in the initial growth of the World Wide Web. Apache is developed and maintained by an open community of developers under the auspices of the Apache Software Foundation. Most commonly used on a Unix-like system, the software is available for a wide variety of operating systems, including Unix, FreeBSD, Linux, Solaris, Novell NetWare, OS X, Microsoft Windows, OS/2, TPF, OpenVMS and eComStation. Released under the Apache License, Apache is open-source software. (http://en.wikipedia.org/wiki/Apache_HTTP_Server)", "properties": { "sw_webserver_apache_version": { "title": "Version", "description": "The specific version of Apache.", "type": "string" }, "sw_webserver_apache_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_apache_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } }, { "name": "Nginx", "description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows. (http://en.wikipedia.org/wiki/Nginx)", "properties": { "sw_webserver_nginx_version": { "title": "Version", "description": "The specific version of Nginx.", "type": "string" }, "sw_webserver_nginx_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_nginx_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } }, { "name": "IIS", "description": "Internet Information Services (IIS, formerly Internet Information Server) is an extensible web server created by Microsoft. IIS supports HTTP, HTTPS, FTP, FTPS, SMTP and NNTP. IIS is not turned on by default when Windows is installed. The IIS Manager is accessed through the Microsoft Management Console or Administrative Tools in the Control Panel. (http://en.wikipedia.org/wiki/Internet_Information_Services)", "properties": { "sw_webserver_iis_version": { "title": "Version", "description": "The specific version of IIS.", "type": "string" }, "sw_webserver_iis_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_iis_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/metadefs/storage-volume-type.json0000664000175000017500000000211400000000000022242 0ustar00zuulzuul00000000000000{ "namespace": "OS::Cinder::Volumetype", "display_name": "Cinder Volume Type", "description": "The Cinder volume type configuration option. Volume type assignment provides a mechanism not only to provide scheduling to a specific storage back-end, but also can be used to specify specific information for a back-end storage device to act upon.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "cinder_" } ], "properties": { "img_volume_type": { "title": "Image Volume Type", "description": "Specifies the volume type that should be applied during new volume creation with a image. This value is passed to Cinder when creating a new volume. Priority of volume type related parameters are 1.volume_type(via API or CLI), 2.cinder_img_volume_type, 3.default_volume_type(via cinder.conf). If not specified, volume_type or default_volume_type will be referred based on their priority.", "type": "string" } } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8383007 glance-29.0.0/etc/oslo-config-generator/0000775000175000017500000000000000000000000020034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/oslo-config-generator/glance-api.conf0000664000175000017500000000101700000000000022702 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 80 output_file = etc/glance-api.conf.sample namespace = castellan.config namespace = glance.api namespace = glance.store namespace = os_brick namespace = glance.multi_store namespace = oslo.concurrency namespace = oslo.messaging namespace = oslo.db namespace = oslo.policy namespace = keystonemiddleware.auth_token namespace = oslo.limit namespace = oslo.log namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.reports ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/oslo-config-generator/glance-cache.conf0000664000175000017500000000027100000000000023175 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 80 output_file = etc/glance-cache.conf.sample namespace = glance.cache namespace = glance.store namespace = os_brick namespace = oslo.log namespace = oslo.policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/oslo-config-generator/glance-image-import.conf0000664000175000017500000000013600000000000024524 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 80 output_file = etc/glance-image-import.conf.sample namespace = glance././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/oslo-config-generator/glance-manage.conf0000664000175000017500000000021100000000000023354 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 80 output_file = etc/glance-manage.conf.sample namespace = glance.manage namespace = oslo.db namespace = oslo.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/oslo-config-generator/glance-scrubber.conf0000664000175000017500000000036000000000000023740 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 80 output_file = etc/glance-scrubber.conf.sample namespace = glance.scrubber namespace = glance.store namespace = os_brick namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/ovf-metadata.json.sample0000664000175000017500000000017600000000000020360 0ustar00zuulzuul00000000000000{ "cim_pasd": [ "ProcessorArchitecture", "InstructionSet", "InstructionSetExtensionName" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/property-protections-policies.conf.sample0000664000175000017500000000257200000000000024026 0ustar00zuulzuul00000000000000# property-protections-policies.conf.sample # # This file is an example config file for when # property_protection_rule_format=policies is enabled. # # Specify regular expression for which properties will be protected in [] # For each section, specify CRUD permissions. # # The permissions specified may refer to policies defined in a policy file. # The name of this file may be specified as the value of the policy_file # option in the [oslo_policy] section of the glance-api.conf file. The # format of this file may be JSON or YAML. # # The property rules will be applied in the order specified. Once # a match is found the remaining property rules will not be applied. # # WARNING: # * If the reg ex specified below does not compile, then # the glance-api service fails to start. (Guide for reg ex python compiler # used: # http://docs.python.org/2/library/re.html#regular-expression-syntax) # * If an operation(create, read, update, delete) is not specified or misspelt # then the glance-api service fails to start. # So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! # # NOTE: Only one policy can be specified per action. If multiple policies are # specified, then the glance-api service fails to start. [^x_.*] create = default read = default update = default delete = default [.*] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/property-protections-roles.conf.sample0000664000175000017500000000211500000000000023334 0ustar00zuulzuul00000000000000# property-protections-roles.conf.sample # # This file is an example config file for when # property_protection_rule_format=roles is enabled. # # Specify regular expression for which properties will be protected in [] # For each section, specify CRUD permissions. # The property rules will be applied in the order specified. Once # a match is found the remaining property rules will not be applied. # # WARNING: # * If the reg ex specified below does not compile, then # glance-api service will not start. (Guide for reg ex python compiler used: # http://docs.python.org/2/library/re.html#regular-expression-syntax) # * If an operation(create, read, update, delete) is not specified or misspelt # then the glance-api service will not start. # So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! # # NOTE: Multiple roles can be specified for a given operation. These roles must # be comma separated. [^x_.*] create = admin,member,_member_ read = admin,member,_member_ update = admin,member,_member_ delete = admin,member,_member_ [.*] create = admin read = admin update = admin delete = admin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/etc/schema-image.json0000664000175000017500000000414100000000000017044 0ustar00zuulzuul00000000000000{ "kernel_id": { "type": ["null", "string"], "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." }, "ramdisk_id": { "type": ["null", "string"], "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." }, "instance_uuid": { "type": "string", "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" }, "architecture": { "description": "Operating system architecture as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "type": "string" }, "os_distro": { "description": "Common name of operating system distribution as specified in https://docs.openstack.org/python-glanceclient/latest/cli/property-keys.html", "type": "string" }, "os_version": { "description": "Operating system version as specified by the distributor.", "type": "string" }, "description": { "description": "A human-readable string describing this image.", "type": "string" }, "cinder_encryption_key_id": { "description": "Identifier in the OpenStack Key Management Service for the encryption key for the Block Storage Service to use when mounting a volume created from this image", "type": "string" }, "cinder_encryption_key_deletion_policy": { "description": "States the condition under which the Image Service will delete the object associated with the 'cinder_encryption_key_id' image property. If this property is missing, the Image Service will take no action", "type": "string", "enum": [ "on_image_deletion", "do_not_delete" ] } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8423011 glance-29.0.0/glance/0000775000175000017500000000000000000000000014307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/__init__.py0000664000175000017500000000000000000000000016406 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8423011 glance-29.0.0/glance/api/0000775000175000017500000000000000000000000015060 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/__init__.py0000664000175000017500000000234700000000000017177 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import paste.urlmap CONF = cfg.CONF def root_app_factory(loader, global_conf, **local_conf): return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of deployment flavor.""" pipeline = local_conf[CONF.paste_deploy.flavor or 'default'] pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/common.py0000664000175000017500000002020100000000000016715 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import glance.async_ from glance.common import exception from glance.i18n import _, _LE, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF GLANCE_RESERVED_NS = 'os_glance' _CACHED_THREAD_POOL = {} def size_checked_iter(response, image_meta, expected_size, image_iter, notifier): image_id = image_meta['id'] bytes_written = 0 def notify_image_sent_hook(env): image_send_notification(bytes_written, expected_size, image_meta, response.request, notifier) # Add hook to process after response is fully sent if 'eventlet.posthooks' in response.request.environ: response.request.environ['eventlet.posthooks'].append( (notify_image_sent_hook, (), {})) try: for chunk in image_iter: yield chunk bytes_written += len(chunk) except Exception as err: with excutils.save_and_reraise_exception(): msg = (_LE("An error occurred reading from backend storage for " "image %(image_id)s: %(err)s") % {'image_id': image_id, 'err': err}) LOG.error(msg) if expected_size != bytes_written: msg = (_LE("Backend storage for image %(image_id)s " "disconnected after writing only %(bytes_written)d " "bytes") % {'image_id': image_id, 'bytes_written': bytes_written}) LOG.error(msg) raise exception.GlanceException(_("Corrupt image download for " "image %(image_id)s") % {'image_id': image_id}) def image_send_notification(bytes_written, expected_size, image_meta, request, notifier): """Send an image.send message to the notifier.""" try: context = request.context payload = { 'bytes_sent': bytes_written, 'image_id': image_meta['id'], 'owner_id': image_meta['owner'], 'receiver_tenant_id': context.project_id, 'receiver_user_id': context.user_id, 'destination_ip': request.remote_addr, } if bytes_written != expected_size: notify = notifier.error else: notify = notifier.info notify('image.send', payload) except Exception as err: msg = (_LE("An error occurred during image.send" " notification: %(err)s") % {'err': err}) LOG.error(msg) def get_remaining_quota(context, db_api, image_id=None): """Method called to see if the user is allowed to store an image. Checks if it is allowed based on the given size in glance based on their quota and current usage. :param context: :param db_api: The db_api in use for this configuration :param image_id: The image that will be replaced with this new data size :returns: The number of bytes the user has remaining under their quota. None means infinity """ # NOTE(jbresnah) in the future this value will come from a call to # keystone. users_quota = CONF.user_storage_quota # set quota must have a number optionally followed by B, KB, MB, # GB or TB without any spaces in between pattern = re.compile(r'^(\d+)((K|M|G|T)?B)?$') match = pattern.match(users_quota) if not match: LOG.error(_LE("Invalid value for option user_storage_quota: " "%(users_quota)s"), {'users_quota': users_quota}) raise exception.InvalidOptionValue(option='user_storage_quota', value=users_quota) quota_value, quota_unit = (match.groups())[0:2] # fall back to Bytes if user specified anything other than # permitted values quota_unit = quota_unit or "B" factor = getattr(units, quota_unit.replace('B', 'i'), 1) users_quota = int(quota_value) * factor if users_quota <= 0: return usage = db_api.user_get_storage_usage(context, context.owner, image_id=image_id) return users_quota - usage def check_quota(context, image_size, db_api, image_id=None): """Method called to see if the user is allowed to store an image. Checks if it is allowed based on the given size in glance based on their quota and current usage. :param context: :param image_size: The size of the image we hope to store :param db_api: The db_api in use for this configuration :param image_id: The image that will be replaced with this new data size :returns: """ # NOTE(danms): If keystone quotas are enabled, those take # precedence and this check is a no-op. if CONF.use_keystone_limits: return remaining = get_remaining_quota(context, db_api, image_id=image_id) if remaining is None: return user = getattr(context, 'user_id', '') if image_size is None: # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit if remaining <= 0: LOG.warning(_LW("User %(user)s attempted to upload an image of" " unknown size that will exceed the quota." " %(remaining)d bytes remaining."), {'user': user, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return if image_size > remaining: LOG.warning(_LW("User %(user)s attempted to upload an image of size" " %(size)d that will exceed the quota. %(remaining)d" " bytes remaining."), {'user': user, 'size': image_size, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return remaining def memoize(lock_name): def memoizer_wrapper(func): @lockutils.synchronized(lock_name) def memoizer(lock_name): if lock_name not in _CACHED_THREAD_POOL: _CACHED_THREAD_POOL[lock_name] = func() return _CACHED_THREAD_POOL[lock_name] return memoizer(lock_name) return memoizer_wrapper # NOTE(danms): This is the default pool size that will be used for # the get_thread_pool() cache wrapper below. This is a global here # because it needs to be overridden for the pure-wsgi mode in # wsgi_app.py where native threads are used. DEFAULT_POOL_SIZE = 1024 def get_thread_pool(lock_name, size=None): """Initializes thread pool. If thread pool is present in cache, then returns it from cache else create new pool, stores it in cache and return newly created pool. @param lock_name: Name of the lock. @param size: Size of pool. @return: ThreadPoolModel """ if size is None: size = DEFAULT_POOL_SIZE @memoize(lock_name) def _get_thread_pool(): threadpool_cls = glance.async_.get_threadpool_model() LOG.debug('Initializing named threadpool %r', lock_name) return threadpool_cls(size) return _get_thread_pool ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8423011 glance-29.0.0/glance/api/middleware/0000775000175000017500000000000000000000000017175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/__init__.py0000664000175000017500000000000000000000000021274 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/cache.py0000664000175000017500000003050300000000000020613 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Transparent image file caching middleware, designed to live on Glance API nodes. When images are requested from the API node, this middleware caches the returned image file to local filesystem. When subsequent requests for the same image file are received, the local cached copy of the image file is returned. """ import http.client as http import re from oslo_log import log as logging import webob from glance.api.common import size_checked_iter from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db from glance.i18n import _LE, _LI from glance import image_cache from glance import notifier LOG = logging.getLogger(__name__) PATTERNS = { ('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'), ('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'), ('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'), ('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$') } class CacheFilter(wsgi.Middleware): def __init__(self, app): self.cache = image_cache.ImageCache() self.policy = policy.Enforcer() LOG.info(_LI("Initialized image cache middleware")) super(CacheFilter, self).__init__(app) def _verify_metadata(self, image_meta): """ Sanity check the 'deleted' and 'size' metadata values. """ # NOTE: admins can see image metadata in the v1 API, but shouldn't # be able to download the actual image data. if image_meta['status'] == 'deleted' and image_meta['deleted']: raise exception.NotFound() if not image_meta['size']: # override image size metadata with the actual cached # file size, see LP Bug #900959 if not isinstance(image_meta, policy.ImageTarget): image_meta['size'] = self.cache.get_image_size( image_meta['id']) else: image_meta.target.size = self.cache.get_image_size( image_meta['id']) @staticmethod def _match_request(request): """Determine the version of the url and extract the image id :returns: tuple of version and image id if the url is a cacheable, otherwise None """ for ((version, method), pattern) in PATTERNS.items(): if request.method != method: continue match = pattern.match(request.path_info) if match is None: continue image_id = match.group(1) # Ensure the image id we got looks like an image id to filter # out a URI like /images/detail. See LP Bug #879136 if image_id != 'detail': return (version, method, image_id) def _enforce(self, req, image): """Authorize an action against our policies""" api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.download_image() def _get_v2_image_metadata(self, request, image_id): """ Retrieves image and for v2 api and creates adapter like object to access image core or custom properties on request. """ db_api = glance.db.get_api() image_repo = glance.db.ImageRepo(request.context, db_api) try: image = image_repo.get(image_id) # Storing image object in request as it is required in # _process_v2_request call. request.environ['api.cache.image'] = image return (image, policy.ImageTarget(image)) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) def process_request(self, request): """ For requests for an image file, we check the local image cache. If present, we return the image file, appending the image metadata in headers. If not present, we pass the request on to the next application in the pipeline. """ match = self._match_request(request) try: (version, method, image_id) = match except TypeError: # Trying to unpack None raises this exception return None self._stash_request_info(request, image_id, method, version) # Partial image download requests shall not be served from cache # Bug: 1664709 # TODO(dharinic): If an image is already cached, add support to serve # only the requested bytes (partial image download) from the cache. if (request.headers.get('Content-Range') or request.headers.get('Range')): return None if request.method != 'GET' or not self.cache.is_cached(image_id): return None method = getattr(self, '_get_%s_image_metadata' % version) image, metadata = method(request, image_id) # Deactivated images shall not be served from cache if metadata['status'] == 'deactivated': return None # NOTE(abhishekk): This means image is present in cache and before # request is coming to API we are enforcing this check in the # middleware self._enforce(request, image) LOG.debug("Cache hit for image '%s'", image_id) image_iterator = self.get_from_cache(image_id) method = getattr(self, '_process_%s_request' % version) try: return method(request, image_id, image_iterator, metadata) except exception.ImageNotFound: msg = _LE("Image cache contained image file for image '%s', " "however the database did not contain metadata for " "that image!") % image_id LOG.error(msg) self.cache.delete_cached_image(image_id) @staticmethod def _stash_request_info(request, image_id, method, version): """ Preserve the image id, version and request method for later retrieval """ request.environ['api.cache.image_id'] = image_id request.environ['api.cache.method'] = method request.environ['api.cache.version'] = version @staticmethod def _fetch_request_info(request): """ Preserve the cached image id, version for consumption by the process_response method of this middleware """ try: image_id = request.environ['api.cache.image_id'] method = request.environ['api.cache.method'] version = request.environ['api.cache.version'] except KeyError: return None else: return (image_id, method, version) def _process_v2_request(self, request, image_id, image_iterator, image_meta): # We do some contortions to get the image_metadata so # that we can provide it to 'size_checked_iter' which # will generate a notification. # TODO(mclaren): Make notification happen more # naturally once caching is part of the domain model. image = request.environ['api.cache.image'] self._verify_metadata(image_meta) response = webob.Response(request=request) response.app_iter = size_checked_iter(response, image_meta, image_meta['size'], image_iterator, notifier.Notifier()) # NOTE (flwang): Set the content-type, content-md5 and content-length # explicitly to be consistent with the non-cache scenario. # Besides, it's not worth the candle to invoke the "download" method # of ResponseSerializer under image_data. Because method "download" # will reset the app_iter. Then we have to call method # "size_checked_iter" to avoid missing any notification. But after # call "size_checked_iter", we will lose the content-md5 and # content-length got by the method "download" because of this issue: # https://github.com/Pylons/webob/issues/86 response.headers['Content-Type'] = 'application/octet-stream' if image.checksum: response.headers['Content-MD5'] = image.checksum response.headers['Content-Length'] = str(image.size) return response def process_response(self, resp): """ We intercept the response coming back from the main images Resource, removing image file from the cache if necessary """ status_code = self.get_status_code(resp) if not 200 <= status_code < 300: return resp # Note(dharinic): Bug: 1664709: Do not cache partial images. if status_code == http.PARTIAL_CONTENT: return resp try: (image_id, method, version) = self._fetch_request_info( resp.request) except TypeError: return resp if method == 'GET' and status_code == http.NO_CONTENT: # Bugfix:1251055 - Don't cache non-existent image files. # NOTE: Both GET for an image without locations and DELETE return # 204 but DELETE should be processed. return resp method_str = '_process_%s_response' % method try: process_response_method = getattr(self, method_str) except AttributeError: LOG.error(_LE('could not find %s'), method_str) # Nothing to do here, move along return resp else: return process_response_method(resp, image_id, version=version) def _process_DELETE_response(self, resp, image_id, version=None): if self.cache.is_cached(image_id): LOG.debug("Removing image %s from cache", image_id) self.cache.delete_cached_image(image_id) return resp def _process_GET_response(self, resp, image_id, version=None): image_checksum = resp.headers.get('Content-MD5') if not image_checksum: # API V1 stores the checksum in a different header: image_checksum = resp.headers.get('x-image-meta-checksum') if not image_checksum: LOG.error(_LE("Checksum header is missing.")) # fetch image_meta on the basis of version image = None if version: method = getattr(self, '_get_%s_image_metadata' % version) image, metadata = method(resp.request, image_id) # NOTE(zhiyan): image_cache return a generator object and set to # response.app_iter, it will be called by eventlet.wsgi later. # So we need enforce policy firstly but do it by application # since eventlet.wsgi could not catch webob.exc.HTTPForbidden and # return 403 error to client then. # FIXME(abhishekk): This policy check here is not necessary as this # will hit only during first image download i.e. while image is not # present in cache. We already enforced same check in API layer and # enforcing same check here again makes no sense. self._enforce(resp.request, image) resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum, resp.app_iter) return resp def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int return response.status def get_from_cache(self, image_id): """Called if cache hit""" with self.cache.open_for_read(image_id) as cache_file: chunks = utils.chunkiter(cache_file) for chunk in chunks: yield chunk ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/cache_manage.py0000664000175000017500000000573200000000000022131 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image Cache Management API """ from oslo_log import log as logging import routes from glance.api.v2 import cached_images from glance.common import wsgi from glance.i18n import _LI LOG = logging.getLogger(__name__) class CacheManageFilter(wsgi.Middleware): def __init__(self, app): mapper = routes.Mapper() resource = cached_images.create_resource() mapper.connect("/v2/cached_images", controller=resource, action="get_cached_images", conditions=dict(method=["GET"])) mapper.connect("/v2/cached_images/{image_id}", controller=resource, action="delete_cached_image", conditions=dict(method=["DELETE"])) mapper.connect("/v2/cached_images", controller=resource, action="delete_cached_images", conditions=dict(method=["DELETE"])) mapper.connect("/v2/queued_images/{image_id}", controller=resource, action="queue_image", conditions=dict(method=["PUT"])) mapper.connect("/v2/queued_images", controller=resource, action="get_queued_images", conditions=dict(method=["GET"])) mapper.connect("/v2/queued_images/{image_id}", controller=resource, action="delete_queued_image", conditions=dict(method=["DELETE"])) mapper.connect("/v2/queued_images", controller=resource, action="delete_queued_images", conditions=dict(method=["DELETE"])) self._mapper = mapper self._resource = resource LOG.info(_LI("Initialized image cache management middleware")) super(CacheManageFilter, self).__init__(app) def process_request(self, request): # Map request to our resource object if we can handle it match = self._mapper.match(request.path_info, request.environ) if match: request.environ['wsgiorg.routing_args'] = (None, match) return self._resource(request) # Pass off downstream if we don't match the request path else: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/context.py0000664000175000017500000001316600000000000021242 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import webob.exc from glance.api import policy from glance.common import wsgi import glance.context from glance.i18n import _, _LW context_opts = [ cfg.BoolOpt('allow_anonymous_access', default=False, help=_(""" Allow limited access to unauthenticated users. Assign a boolean to determine API access for unauthenticated users. When set to False, the API cannot be accessed by unauthenticated users. When set to True, unauthenticated users can access the API with read-only privileges. This however only applies when using ContextMiddleware. Possible values: * True * False Related options: * None """)), cfg.IntOpt('max_request_id_length', default=64, min=0, help=_(""" Limit the request ID length. Provide an integer value to limit the length of the request ID to the specified length. The default value is 64. Users can change this to any ineteger value between 0 and 16384 however keeping in mind that a larger value may flood the logs. Possible values: * Integer value between 0 and 16384 Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class BaseContextMiddleware(wsgi.Middleware): def process_response(self, resp): try: request_id = resp.request.context.request_id except AttributeError: LOG.warning(_LW('Unable to retrieve request id from context')) else: # For python 3 compatibility need to use bytes type prefix = b'req-' if isinstance(request_id, bytes) else 'req-' if not request_id.startswith(prefix): request_id = prefix + request_id resp.headers['x-openstack-request-id'] = request_id return resp class ContextMiddleware(BaseContextMiddleware): def __init__(self, app): self.policy_enforcer = policy.Enforcer() super(ContextMiddleware, self).__init__(app) def process_request(self, req): """Convert authentication information into a request context Generate a glance.context.RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' and anonymous access is disallowed """ if req.headers.get('X-Identity-Status') == 'Confirmed': req.context = self._get_authenticated_context(req) elif CONF.allow_anonymous_access: req.context = self._get_anonymous_context() else: raise webob.exc.HTTPUnauthorized() def _get_anonymous_context(self): kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': False, 'read_only': True, 'policy_enforcer': self.policy_enforcer, } return glance.context.RequestContext(**kwargs) def _get_authenticated_context(self, req): service_catalog = None if req.headers.get('X-Service-Catalog') is not None: try: catalog_header = req.headers.get('X-Service-Catalog') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) request_id = req.headers.get('X-Openstack-Request-ID') if request_id and (0 < CONF.max_request_id_length < len(request_id)): msg = (_('x-openstack-request-id is too long, max size %s') % CONF.max_request_id_length) return webob.exc.HTTPRequestHeaderFieldsTooLarge(comment=msg) kwargs = { 'service_catalog': service_catalog, 'policy_enforcer': self.policy_enforcer, 'request_id': request_id, } ctxt = glance.context.RequestContext.from_environ(req.environ, **kwargs) # FIXME(jamielennox): glance has traditionally lowercased its roles. # This was related to bug #1010519 where at least the admin role was # case insensitive. This seems to no longer be the case and should be # fixed. ctxt.roles = [r.lower() for r in ctxt.roles] return ctxt class UnauthenticatedContextMiddleware(BaseContextMiddleware): def process_request(self, req): """Create a context without an authorized user.""" kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': True, } req.context = glance.context.RequestContext(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/gzip.py0000664000175000017500000000434700000000000020530 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Use gzip compression if the client accepts it. """ import re from oslo_log import log as logging from glance.common import wsgi from glance.i18n import _LI LOG = logging.getLogger(__name__) class GzipMiddleware(wsgi.Middleware): re_zip = re.compile(r'\bgzip\b') def __init__(self, app): LOG.info(_LI("Initialized gzip middleware")) super(GzipMiddleware, self).__init__(app) def process_response(self, response): request = response.request accept_encoding = request.headers.get('Accept-Encoding', '') if self.re_zip.search(accept_encoding): # NOTE(flaper87): Webob removes the content-md5 when # app_iter is called. We'll keep it and reset it later checksum = response.headers.get("Content-MD5") # NOTE(flaper87): We'll use lazy for images so # that they can be compressed without reading # the whole content in memory. Notice that using # lazy will set response's content-length to 0. content_type = response.headers.get("Content-Type", "") lazy = content_type == "application/octet-stream" # NOTE(flaper87): Webob takes care of the compression # process, it will replace the body either with a # compressed body or a generator - used for lazy com # pression - depending on the lazy value. # # Webob itself will set the Content-Encoding header. response.encode_content(lazy=lazy) if checksum: response.headers['Content-MD5'] = checksum return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/middleware/version_negotiation.py0000664000175000017500000001071100000000000023634 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A filter middleware that inspects the requested URI for a version string and/or Accept headers and attempts to negotiate an API controller to return """ from oslo_config import cfg from oslo_log import log as logging from glance.api import versions from glance.common import wsgi CONF = cfg.CONF LOG = logging.getLogger(__name__) class VersionNegotiationFilter(wsgi.Middleware): def __init__(self, app): self.versions_app = versions.Controller() self.allowed_versions = None self.vnd_mime_type = 'application/vnd.openstack.images-' super(VersionNegotiationFilter, self).__init__(app) def process_request(self, req): """Try to find a version first in the accept header, then the URL""" args = {'method': req.method, 'path': req.path, 'accept': req.accept} LOG.debug("Determining version of request: %(method)s %(path)s " "Accept: %(accept)s", args) # If the request is for /versions, just return the versions container if req.path_info_peek() == "versions": return self.versions_app.index(req, explicit=True) accept = str(req.accept) if accept.startswith(self.vnd_mime_type): LOG.debug("Using media-type versioning") token_loc = len(self.vnd_mime_type) req_version = accept[token_loc:] else: LOG.debug("Using url versioning") # Remove version in url so it doesn't conflict later req_version = self._pop_path_info(req) try: version = self._match_version_string(req_version) except ValueError: LOG.debug("Unknown version. Returning version choices.") return self.versions_app req.environ['api.version'] = version req.path_info = ''.join(('/v', str(version), req.path_info)) LOG.debug("Matched version: v%d", version) LOG.debug('new path %s', req.path_info) return None def _get_allowed_versions(self): allowed_versions = {} allowed_versions['v2'] = 2 allowed_versions['v2.0'] = 2 allowed_versions['v2.1'] = 2 allowed_versions['v2.2'] = 2 allowed_versions['v2.3'] = 2 allowed_versions['v2.4'] = 2 allowed_versions['v2.5'] = 2 allowed_versions['v2.6'] = 2 allowed_versions['v2.7'] = 2 allowed_versions['v2.9'] = 2 if CONF.image_cache_dir: allowed_versions['v2.14'] = 2 allowed_versions['v2.16'] = 2 allowed_versions['v2.15'] = 2 if CONF.enabled_backends: allowed_versions['v2.8'] = 2 allowed_versions['v2.10'] = 2 allowed_versions['v2.11'] = 2 allowed_versions['v2.12'] = 2 allowed_versions['v2.13'] = 2 allowed_versions['v2.17'] = 2 return allowed_versions def _match_version_string(self, subject): """ Given a string, tries to match a major and/or minor version number. :param subject: The string to check :returns: version found in the subject :raises ValueError: if no acceptable version could be found """ if self.allowed_versions is None: self.allowed_versions = self._get_allowed_versions() if subject in self.allowed_versions: return self.allowed_versions[subject] else: raise ValueError() def _pop_path_info(self, req): """ 'Pops' off the next segment of PATH_INFO, returns the popped segment. Do NOT push it onto SCRIPT_NAME. """ path = req.path_info if not path: return None while path.startswith('/'): path = path[1:] idx = path.find('/') if idx == -1: idx = len(path) r = path[:idx] req.path_info = path[idx:] return r ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/policy.py0000664000175000017500000001610600000000000016735 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Glance""" from collections import abc from oslo_config import cfg from oslo_log import log as logging from oslo_policy import opts from oslo_policy import policy from glance.common import exception from glance.domain import proxy from glance import policies LOG = logging.getLogger(__name__) CONF = cfg.CONF _ENFORCER = None # TODO(gmann): Remove overriding the default value of config options # 'policy_file' once oslo_policy changes its default value to what # is overridden here. DEFAULT_POLICY_FILE = 'policy.yaml' opts.set_defaults( cfg.CONF, DEFAULT_POLICY_FILE) class Enforcer(policy.Enforcer): """Responsible for loading and enforcing rules""" def __init__(self, suppress_deprecation_warnings=False): """Init an policy Enforcer. :param suppress_deprecation_warnings: Whether to suppress the deprecation warnings. """ super(Enforcer, self).__init__(CONF, use_conf=True, overwrite=False) # NOTE(gmann): Explicitly disable the warnings for policies # changing their default check_str. For new RBAC, all the policy # defaults have been changed and warning for each policy started # filling the logs limit for various tool. # Once we move to new defaults only world then we can enable these # warning again. self.suppress_default_change_warnings = True if suppress_deprecation_warnings: self.suppress_deprecation_warnings = True self.register_defaults(policies.list_rules()) def add_rules(self, rules): """Add new rules to the Rules object""" self.set_rules(rules, overwrite=False, use_conf=self.use_conf) def enforce(self, context, action, target, registered=True): """Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :raises: `glance.common.exception.Forbidden` :returns: A non-False value if access is allowed. """ if registered and action not in self.registered_rules: raise policy.PolicyNotRegistered(action) try: return super(Enforcer, self).enforce(action, target, context, do_raise=True, exc=exception.Forbidden, action=action) except policy.InvalidScope: raise exception.Forbidden(action=action) def check(self, context, action, target, registered=True): """Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :returns: A non-False value if access is allowed. """ if registered and action not in self.registered_rules: raise policy.PolicyNotRegistered(action) return super(Enforcer, self).enforce(action, target, context) def check_is_admin(self, context): """Check if the given context is associated with an admin role, as defined via the 'context_is_admin' RBAC rule. :param context: Glance request context :returns: A non-False value if context role is admin. """ return self.check(context, 'context_is_admin', context.to_dict()) def get_enforcer(): CONF([], project='glance') global _ENFORCER if _ENFORCER is None: _ENFORCER = Enforcer() return _ENFORCER def _enforce_image_visibility(policy, context, visibility, target): if visibility == 'public': policy.enforce(context, 'publicize_image', target) elif visibility == 'community': policy.enforce(context, 'communitize_image', target) class ImageTarget(abc.Mapping): SENTINEL = object() def __init__(self, target): """Initialize the object :param target: Object being targeted """ self.target = target self._target_keys = [k for k in dir(proxy.Image) if not k.startswith('__') # NOTE(lbragstad): The locations attributes is an # instance of ImageLocationsProxy, which isn't # serialized into anything oslo.policy can use. If # we need to use locations in policies, we need to # modify how we represent those location objects # before we call enforcement with target # information. Omitting for not until that is a # necessity. if not k == 'locations' if not callable(getattr(proxy.Image, k))] def __getitem__(self, key): """Return the value of 'key' from the target. If the target has the attribute 'key', return it. :param key: value to retrieve """ key = self.key_transforms(key) value = getattr(self.target, key, self.SENTINEL) if value is self.SENTINEL: extra_properties = getattr(self.target, 'extra_properties', None) if extra_properties is not None: value = extra_properties[key] else: value = None return value def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __len__(self): length = len(self._target_keys) length += len(getattr(self.target, 'extra_properties', {})) return length def __iter__(self): for key in self._target_keys: yield key for key in getattr(self.target, 'extra_properties', {}).keys(): yield key for alias in ['project_id']: yield alias def key_transforms(self, key): transforms = { 'id': 'image_id', 'project_id': 'owner', 'member_id': 'member', } return transforms.get(key, key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/property_protections.py0000664000175000017500000001175000000000000021753 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception import glance.domain.proxy class ProtectedImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, image_factory, context, property_rules): self.image_factory = image_factory self.context = context self.property_rules = property_rules kwargs = {'context': self.context, 'property_rules': self.property_rules} super(ProtectedImageFactoryProxy, self).__init__( image_factory, proxy_class=ProtectedImageProxy, proxy_kwargs=kwargs) def new_image(self, **kwargs): extra_props = kwargs.pop('extra_properties', {}) extra_properties = {} for key in extra_props.keys(): if self.property_rules.check_property_rules(key, 'create', self.context): extra_properties[key] = extra_props[key] else: raise exception.ReservedProperty(property=key) return super(ProtectedImageFactoryProxy, self).new_image( extra_properties=extra_properties, **kwargs) class ProtectedImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, property_rules): self.context = context self.image_repo = image_repo self.property_rules = property_rules proxy_kwargs = {'context': self.context} super(ProtectedImageRepoProxy, self).__init__( image_repo, item_proxy_class=ProtectedImageProxy, item_proxy_kwargs=proxy_kwargs) def get(self, image_id): return ProtectedImageProxy(self.image_repo.get(image_id), self.context, self.property_rules) def list(self, *args, **kwargs): images = self.image_repo.list(*args, **kwargs) return [ProtectedImageProxy(image, self.context, self.property_rules) for image in images] class ProtectedImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, property_rules): self.image = image self.context = context self.property_rules = property_rules self.image.extra_properties = ExtraPropertiesProxy( self.context, self.image.extra_properties, self.property_rules) super(ProtectedImageProxy, self).__init__(self.image) class ExtraPropertiesProxy(glance.domain.ExtraProperties): def __init__(self, context, extra_props, property_rules): self.context = context self.property_rules = property_rules extra_properties = {} for key in extra_props.keys(): if self.property_rules.check_property_rules(key, 'read', self.context): extra_properties[key] = extra_props[key] super(ExtraPropertiesProxy, self).__init__(extra_properties) def __getitem__(self, key): if self.property_rules.check_property_rules(key, 'read', self.context): return dict.__getitem__(self, key) else: raise KeyError def __setitem__(self, key, value): # NOTE(isethi): Exceptions are raised only for actions update, delete # and create, where the user proactively interacts with the properties. # A user cannot request to read a specific property, hence reads do # raise an exception try: if self.__getitem__(key) is not None: if self.property_rules.check_property_rules(key, 'update', self.context): return dict.__setitem__(self, key, value) else: raise exception.ReservedProperty(property=key) except KeyError: if self.property_rules.check_property_rules(key, 'create', self.context): return dict.__setitem__(self, key, value) else: raise exception.ReservedProperty(property=key) def __delitem__(self, key): if key not in super(ExtraPropertiesProxy, self).keys(): raise KeyError if self.property_rules.check_property_rules(key, 'delete', self.context): return dict.__delitem__(self, key) else: raise exception.ReservedProperty(property=key) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8463016 glance-29.0.0/glance/api/v1/0000775000175000017500000000000000000000000015406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v1/__init__.py0000664000175000017500000000000000000000000017505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v1/router.py0000664000175000017500000000203500000000000017300 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import wsgi def init(mapper): reject_resource = wsgi.Resource(wsgi.RejectMethodController()) mapper.connect("/v1", controller=reject_resource, action="reject") class API(wsgi.Router): """WSGI entry point for satisfy grenade.""" def __init__(self, mapper): mapper = mapper or wsgi.APIMapper() init(mapper) super(API, self).__init__(mapper) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8463016 glance-29.0.0/glance/api/v2/0000775000175000017500000000000000000000000015407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/__init__.py0000664000175000017500000000000000000000000017506 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/cached_images.py0000664000175000017500000002401500000000000020517 0ustar00zuulzuul00000000000000# Copyright 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Controller for Image Cache Management API """ import queue import threading import glance_store from oslo_config import cfg from oslo_log import log as logging import webob.exc from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ from glance import image_cache import glance.notifier CONF = cfg.CONF LOG = logging.getLogger(__name__) WORKER = None class CacheController(object): """ A controller for managing cached images. """ def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): global WORKER if not CONF.image_cache_dir: self.cache = None else: self.cache = image_cache.ImageCache() self.policy = policy_enforcer or policy.Enforcer() self.db_api = db_api or glance.db.get_api() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) # Initialize the worker only if cache is enabled if CONF.image_cache_dir and not WORKER: # If we're the first, start the thread WORKER = CacheWorker() WORKER.start() LOG.debug('Started cache worker thread') def _enforce(self, req, image=None, new_policy=None): """Authorize request against given policy""" if not new_policy: new_policy = 'manage_image_cache' try: api_policy.CacheImageAPIPolicy( req.context, image=image, enforcer=self.policy, policy_str=new_policy).manage_image_cache() except exception.Forbidden: LOG.debug("User not permitted by '%s' policy", new_policy) raise webob.exc.HTTPForbidden() if not CONF.image_cache_dir: msg = _("Caching via API is not supported at this site.") raise webob.exc.HTTPNotFound(explanation=msg) def get_cached_images(self, req): """ GET /cached_images Returns a mapping of records about cached images. """ self._enforce(req) images = self.cache.get_cached_images() return dict(cached_images=images) def delete_cached_image(self, req, image_id): """ DELETE /cached_images/ Removes an image from the cache. """ self._enforce(req) self.cache.delete_cached_image(image_id) def delete_cached_images(self, req): """ DELETE /cached_images - Clear all active cached images Removes all images from the cache. """ self._enforce(req) return dict(num_deleted=self.cache.delete_all_cached_images()) def get_queued_images(self, req): """ GET /queued_images Returns a mapping of records about queued images. """ self._enforce(req) images = self.cache.get_queued_images() return dict(queued_images=images) def queue_image(self, req, image_id): """ PUT /queued_images/ Queues an image for caching. We do not check to see if the image is in the registry here. That is done by the prefetcher... """ self._enforce(req) self.cache.queue_image(image_id) def delete_queued_image(self, req, image_id): """ DELETE /queued_images/ Removes an image from the cache. """ self._enforce(req) self.cache.delete_queued_image(image_id) def delete_queued_images(self, req): """ DELETE /queued_images - Clear all active queued images Removes all images from the cache. """ self._enforce(req) return dict(num_deleted=self.cache.delete_all_queued_images()) def delete_cache_entry(self, req, image_id): """ DELETE /cache/ - Remove image from cache Removes the image from cache or queue. """ image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) except exception.NotFound: # We are going to raise this error only if image is # not present in cache or queue list image = None if not self.image_exists_in_cache(image_id): msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) self._enforce(req, new_policy='cache_delete', image=image) self.cache.delete_cached_image(image_id) self.cache.delete_queued_image(image_id) def image_exists_in_cache(self, image_id): queued_images = self.cache.get_queued_images() if image_id in queued_images: return True cached_images = self.cache.get_cached_images() if image_id in [image['image_id'] for image in cached_images]: return True return False def clear_cache(self, req): """ DELETE /cache - Clear cache and queue Removes all images from cache and queue. """ self._enforce(req, new_policy='cache_delete') target = req.headers.get('x-image-cache-clear-target', '').lower() if target == '': res = dict(cache_deleted=self.cache.delete_all_cached_images(), queue_deleted=self.cache.delete_all_queued_images()) elif target == 'cache': res = dict(cache_deleted=self.cache.delete_all_cached_images()) elif target == 'queue': res = dict(queue_deleted=self.cache.delete_all_queued_images()) else: reason = (_("If provided 'x-image-cache-clear-target' must be " "'cache', 'queue' or empty string.")) raise webob.exc.HTTPBadRequest(explanation=reason, request=req, content_type='text/plain') return res def get_cache_state(self, req): """ GET /cache/ - Get currently cached and queued images Returns dict of cached and queued images """ self._enforce(req, new_policy='cache_list') return dict(cached_images=self.cache.get_cached_images(), queued_images=self.cache.get_queued_images()) def queue_image_from_api(self, req, image_id): """ PUT /cache/ Queues an image for caching. We do not check to see if the image is in the registry here. That is done by the prefetcher... """ image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) self._enforce(req, new_policy='cache_image', image=image) if image.status != 'active': msg = _("Only images with status active can be targeted for " "queueing") raise webob.exc.HTTPBadRequest(explanation=msg) self.cache.queue_image(image_id) WORKER.submit(image_id) class CacheWorker(threading.Thread): EXIT_SENTINEL = object() def __init__(self, *args, **kwargs): self.q = queue.Queue(maxsize=-1) # NOTE(abhishekk): Importing the prefetcher just in time to avoid # import loop during initialization from glance.image_cache import prefetcher # noqa self.prefetcher = prefetcher.Prefetcher() super().__init__(*args, **kwargs) # NOTE(abhishekk): Setting daemon to True because if `atexit` event # handler is not called due to some reason the main process will # not hang for the thread which will never exit. self.daemon = True def submit(self, job): self.q.put(job) def terminate(self): # NOTE(danms): Make the API workers call this before we exit # to make sure any cache operations finish. LOG.info('Signaling cache worker thread to exit') self.q.put(self.EXIT_SENTINEL) self.join() LOG.info('Cache worker thread exited') def run(self): while True: task = self.q.get() if task == self.EXIT_SENTINEL: LOG.debug("CacheWorker thread exiting") break LOG.debug("Processing image '%s' for caching", task) self.prefetcher.fetch_image_into_cache(task) # do whatever work you have to do on task self.q.task_done() LOG.debug("Caching of an image '%s' is complete", task) class CachedImageDeserializer(wsgi.JSONRequestDeserializer): pass class CachedImageSerializer(wsgi.JSONResponseSerializer): def queue_image_from_api(self, response, result): response.status_int = 202 def clear_cache(self, response, result): response.status_int = 204 def delete_cache_entry(self, response, result): response.status_int = 204 def create_resource(): """Cached Images resource factory method""" deserializer = CachedImageDeserializer() serializer = CachedImageSerializer() return wsgi.Resource(CacheController(), deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/discovery.py0000664000175000017500000001714200000000000017775 0ustar00zuulzuul00000000000000# Copyright (c) 2017 RedHat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import glance_store as g_store from oslo_config import cfg from oslo_log import log as logging import oslo_serialization.jsonutils as json import webob.exc from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi import glance.db from glance.i18n import _, _LW from glance.quota import keystone as ks_quota CONF = cfg.CONF LOG = logging.getLogger(__name__) class InfoController(object): def __init__(self, policy_enforcer=None): self.policy = policy_enforcer or policy.Enforcer() def get_image_import(self, req): # TODO(jokke): All the rest of the boundaries should be implemented. import_methods = { 'description': 'Import methods available.', 'type': 'array', 'value': CONF.get('enabled_import_methods') } return { 'import-methods': import_methods } def get_stores(self, req): # TODO(abhishekk): This will be removed after config options # 'stores' and 'default_store' are removed. enabled_backends = CONF.enabled_backends if not enabled_backends: msg = _("Multi backend is not supported at this site.") raise webob.exc.HTTPNotFound(explanation=msg) backends = [] for backend in enabled_backends: if backend.startswith("os_glance_"): continue stores = {} if enabled_backends[backend] == 'swift': conf_file = getattr(CONF, backend).swift_store_config_file multitenant = getattr(CONF, backend).swift_store_multi_tenant if multitenant and conf_file: msg = ("The config options 'swift_store_multi_tenant' " "and 'swift_store_config_file' are mutually " "exclusive. If you intend to use multi-tenant " "swift store, please make sure that you have " "not set a swift configuration file with the " "'swift_store_config_file' option. " "Excluding `%s:%s` store details from the " "response as it's not configured correctly." % (backend, enabled_backends[backend])) LOG.warning(_LW(msg)) continue stores['id'] = backend description = getattr(CONF, backend).store_description if description: stores['description'] = description if backend == CONF.glance_store.default_backend: stores['default'] = "true" # Check if http store is configured then mark it as read-only if enabled_backends[backend] == 'http': stores['read-only'] = "true" backends.append(stores) return {'stores': backends} @staticmethod def _get_rbd_properties(store_detail): return { 'chunk_size': store_detail.chunk_size, 'pool': store_detail.pool, 'thin_provisioning': store_detail.thin_provisioning } @staticmethod def _get_file_properties(store_detail): return { 'data_dir': store_detail.datadir, 'chunk_size': store_detail.chunk_size, 'thin_provisioning': store_detail.thin_provisioning } @staticmethod def _get_cinder_properties(store_detail): return { 'volume_type': store_detail.store_conf.cinder_volume_type, 'use_multipath': store_detail.store_conf.cinder_use_multipath } @staticmethod def _get_swift_properties(store_detail): return { 'container': getattr(store_detail, 'container', None), 'large_object_size': store_detail.large_object_size, 'large_object_chunk_size': store_detail.large_object_chunk_size } @staticmethod def _get_s3_properties(store_detail): return { 's3_store_large_object_size': store_detail.s3_store_large_object_size, 's3_store_large_object_chunk_size': store_detail.s3_store_large_object_chunk_size, 's3_store_thread_pools': store_detail.s3_store_thread_pools } @staticmethod def _get_http_properties(store_detail): # NOTE(mrjoshi): Thre are no useful properties # to be exposed. return {} def get_stores_detail(self, req): enabled_backends = CONF.enabled_backends stores = self.get_stores(req).get('stores') try: api_policy.DiscoveryAPIPolicy( req.context, enforcer=self.policy).stores_info_detail() store_mapper = { 'rbd': self._get_rbd_properties, 'file': self._get_file_properties, 'cinder': self._get_cinder_properties, 'swift': self._get_swift_properties, 's3': self._get_s3_properties, 'http': self._get_http_properties } for store in stores: store_type = enabled_backends[store['id']] store['type'] = store_type store_detail = g_store.get_store_from_store_identifier( store['id']) store['properties'] = store_mapper.get(store_type)( store_detail) store['weight'] = getattr(CONF, store['id']).weight except exception.Forbidden as e: LOG.debug("User not permitted to view details") raise webob.exc.HTTPForbidden(explanation=e.msg) return {'stores': stores} def get_usage(self, req): project_usage = ks_quota.get_usage(req.context) return {'usage': {name: {'usage': usage.usage, 'limit': usage.limit} for name, usage in project_usage.items()}} class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, usage_schema=None): super(ResponseSerializer, self).__init__() self.schema = usage_schema or get_usage_schema() def get_usage(self, response, usage): body = json.dumps(self.schema.filter(usage), ensure_ascii=False) response.unicode_body = str(body) response.content_type = 'application/json' _USAGE_SCHEMA = { 'usage': { 'type': 'array', 'items': { 'type': 'object', 'additionalProperties': True, 'validation_data': { 'type': 'object', 'additionalProperties': False, 'properties': { 'usage': {'type': 'integer'}, 'limit': {'type': 'integer'}, }, }, }, }, } def get_usage_schema(): return glance.schema.Schema('usage', copy.deepcopy(_USAGE_SCHEMA)) def create_resource(): usage_schema = get_usage_schema() serializer = ResponseSerializer(usage_schema) return wsgi.Resource(InfoController(), None, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/image_actions.py0000664000175000017500000001123300000000000020563 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import glance_store from oslo_log import log as logging import webob.exc from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _LI import glance.notifier LOG = logging.getLogger(__name__) class ImageActionsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def deactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: # FIXME(danms): This will still enforce the get_image policy # which we don't want image = image_repo.get(image_id) # NOTE(abhishekk): This is the right place to check whether user # have permission to deactivate the image and remove the policy # check later from the policy layer. api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.deactivate_image() status = image.status image.deactivate() # not necessary to change the status if it's already 'deactivated' if status == 'active': image_repo.save(image, from_state='active') LOG.info(_LI("Image %s is deactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to deactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) @utils.mutating def reactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: # FIXME(danms): This will still enforce the get_image policy # which we don't want image = image_repo.get(image_id) # NOTE(abhishekk): This is the right place to check whether user # have permission to reactivate the image and remove the policy # check later from the policy layer. api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.reactivate_image() status = image.status image.reactivate() # not necessary to change the status if it's already 'active' if status == 'deactivated': image_repo.save(image, from_state='deactivated') LOG.info(_LI("Image %s is reactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to reactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def deactivate(self, response, result): response.status_int = http.NO_CONTENT def reactivate(self, response, result): response.status_int = http.NO_CONTENT def create_resource(): """Image data resource factory method""" deserializer = None serializer = ResponseSerializer() controller = ImageActionsController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/image_data.py0000664000175000017500000006264300000000000020047 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from cursive import exception as cursive_exception import glance_store from glance_store import backend from glance_store import location from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import webob.exc import glance.api.policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import trust_auth from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LE, _LI import glance.notifier from glance.quota import keystone as ks_quota LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('public_endpoint', 'glance.api.versions') class ImageDataController(object): def __init__(self, db_api=None, store_api=None, policy_enforcer=None, notifier=None): db_api = db_api or glance.db.get_api() store_api = store_api or glance_store notifier = notifier or glance.notifier.Notifier() self.policy = policy_enforcer or glance.api.policy.Enforcer() self.gateway = glance.gateway.Gateway(db_api, store_api, notifier, self.policy) def _restore(self, image_repo, image): """ Restore the image to queued status. :param image_repo: The instance of ImageRepo :param image: The image will be restored """ try: if image_repo and image: image.status = 'queued' image_repo.save(image) except Exception as e: msg = (_LE("Unable to restore image %(image_id)s: %(e)s") % {'image_id': image.image_id, 'e': encodeutils.exception_to_unicode(e)}) LOG.exception(msg) def _unstage(self, image_repo, image, staging_store): """ Restore the image to queued status and remove data from staging. :param image_repo: The instance of ImageRepo :param image: The image will be restored :param staging_store: The store used for staging """ if CONF.enabled_backends: file_path = "%s/%s" % (getattr( CONF, 'os_glance_staging_store').filesystem_store_datadir, image.image_id) try: loc = location.get_location_from_uri_and_backend( file_path, 'os_glance_staging_store') staging_store.delete(loc) except (glance_store.exceptions.NotFound, glance_store.exceptions.UnknownScheme): pass else: file_path = str(CONF.node_staging_uri + '/' + image.image_id)[7:] if os.path.exists(file_path): try: os.unlink(file_path) except OSError as e: LOG.error(_("Cannot delete staged image data %(fn)s " "[Errno %(en)d]"), {'fn': file_path, 'en': e.errno}) else: LOG.warning(_("Staged image data not found " "at %(fn)s"), {'fn': file_path}) self._restore(image_repo, image) @utils.mutating def upload(self, req, image_id, data, size): try: ks_quota.enforce_image_size_total(req.context, req.context.owner) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) backend = None if CONF.enabled_backends: backend = req.headers.get('x-image-meta-store', CONF.glance_store.default_backend) try: glance_store.get_store_from_store_identifier(backend) except glance_store.UnknownScheme as exc: raise webob.exc.HTTPBadRequest(explanation=exc.msg, request=req, content_type='text/plain') image_repo = self.gateway.get_repo(req.context) image = None refresher = None cxt = req.context try: image = image_repo.get(image_id) # NOTE(abhishekk): This is the right place to check whether user # have permission to upload the image and remove the policy check # later from the policy layer. api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.upload_image() image.status = 'saving' try: # create a trust if backend is registry try: # request user plugin for current token user_plugin = req.environ.get('keystone.token_auth') roles = [] # use roles from request environment because they # are not transformed to lower-case unlike cxt.roles for role_info in req.environ.get( 'keystone.token_info')['token']['roles']: roles.append(role_info['name']) refresher = trust_auth.TokenRefresher(user_plugin, cxt.project_id, roles) except Exception as e: LOG.info(_LI("Unable to create trust: %s " "Use the existing user token."), encodeutils.exception_to_unicode(e)) image_repo.save(image, from_state='queued') ks_quota.enforce_image_count_uploading(req.context, req.context.owner) image.set_data(data, size, backend=backend) try: image_repo.save(image, from_state='saving') except exception.NotAuthenticated: if refresher is not None: # request a new token to update an image in database cxt.auth_token = refresher.refresh_token() image_repo.save(image, from_state='saving') else: raise try: # release resources required for re-auth if refresher is not None: refresher.release_resources() except Exception as e: LOG.info(_LI("Unable to delete trust %(trust)s: %(msg)s"), {"trust": refresher.trust_id, "msg": encodeutils.exception_to_unicode(e)}) except (glance_store.NotFound, exception.ImageNotFound, exception.Conflict): msg = (_("Image %s could not be found after upload. " "The image may have been deleted during the " "upload, cleaning up the chunks uploaded.") % image_id) LOG.warning(msg) # NOTE(sridevi): Cleaning up the uploaded chunks. try: image.delete() except exception.ImageNotFound: # NOTE(sridevi): Ignore this exception pass raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.NotAuthenticated: msg = (_("Authentication error - the token may have " "expired during file upload. Deleting image data for " "%s.") % image_id) LOG.debug(msg) try: image.delete() except exception.NotAuthenticated: # NOTE: Ignore this exception pass raise webob.exc.HTTPUnauthorized(explanation=msg, request=req, content_type='text/plain') except ValueError as e: LOG.debug("Cannot save data for image %(id)s: %(e)s", {'id': image_id, 'e': encodeutils.exception_to_unicode(e)}) self._restore(image_repo, image) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except glance_store.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) self._restore(image_repo, image) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.exception(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except exception.Forbidden: msg = ("Not allowed to upload image data for image %s" % image_id) LOG.debug(msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.LimitExceeded as e: LOG.error(str(e)) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except cursive_exception.SignatureVerificationError as e: msg = (_LE("Signature verification failed for image %(id)s: %(e)s") % {'id': image_id, 'e': encodeutils.exception_to_unicode(e)}) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPBadRequest(explanation=msg) except webob.exc.HTTPGone: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) except webob.exc.HTTPError: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) self._restore(image_repo, image) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to " "internal error")) self._restore(image_repo, image) @utils.mutating def stage(self, req, image_id, data, size): try: ks_quota.enforce_image_staging_total(req.context, req.context.owner) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) image_repo = self.gateway.get_repo(req.context) # NOTE(abhishekk): stage API call does not have its own policy but # it requires get_image access, this is the right place to check # whether user has access to image or not try: image = image_repo.get(image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) api_pol = api_policy.ImageAPIPolicy(req.context, image, enforcer=self.policy) try: api_pol.modify_image() except exception.Forbidden as e: # NOTE(abhishekk): This will throw Forbidden if S-RBAC is not # enabled raise webob.exc.HTTPForbidden(explanation=e.msg) # NOTE(jokke): this is horrible way to do it but as long as # glance_store is in a shape it is, the only way. Don't hold me # accountable for it. # TODO(abhishekk): After removal of backend module from glance_store # need to change this to use multi_backend module. def _build_staging_store(): conf = cfg.ConfigOpts() try: backend.register_opts(conf) except cfg.DuplicateOptError: pass conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') staging_store = backend._load_store(conf, 'file') try: staging_store.configure() except AttributeError: msg = _("'node_staging_uri' is not set correctly. Could not " "load staging store.") raise exception.BadStoreUri(message=msg) return staging_store # NOTE(abhishekk): Use reserved 'os_glance_staging_store' for staging # the data, the else part will be removed once multiple backend feature # is declared as stable. if CONF.enabled_backends: staging_store = glance_store.get_store_from_store_identifier( 'os_glance_staging_store') else: staging_store = _build_staging_store() try: image.status = 'uploading' image_repo.save(image, from_state='queued') ks_quota.enforce_image_count_uploading(req.context, req.context.owner) try: uri, size, id, store_info = staging_store.add( image_id, utils.LimitingReader( utils.CooperativeReader(data), CONF.image_size_cap), 0) image.size = size except glance_store.Duplicate: msg = _("The image %s has data on staging") % image_id raise webob.exc.HTTPConflict(explanation=msg) # NOTE(danms): Record this worker's # worker_self_reference_url in the image metadata so we # know who has the staging data. self_url = CONF.worker_self_reference_url or CONF.public_endpoint if self_url: image.extra_properties['os_glance_stage_host'] = self_url image_repo.save(image, from_state='uploading') except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.LimitExceeded as e: LOG.debug(str(e)) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.debug(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to stage image data due to " "internal error")) self._restore(image_repo, image) def download(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) if image.status == 'deactivated' and not req.context.is_admin: msg = _('The requested image has been deactivated. ' 'Image data download is forbidden.') raise exception.Forbidden(message=msg) # NOTE(abhishekk): This is the right place to verify whether # user has permission to download the image or not. api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.download_image() except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to download image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) return image class RequestDeserializer(wsgi.JSONRequestDeserializer): def upload(self, request): try: request.get_content_type(('application/octet-stream',)) except exception.InvalidContentType as e: raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg) if self.is_valid_encoding(request) and self.is_valid_method(request): request.is_body_readable = True image_size = request.content_length or None return {'size': image_size, 'data': request.body_file} def stage(self, request): if "glance-direct" not in CONF.enabled_import_methods: msg = _("'glance-direct' method is not available at this site.") raise webob.exc.HTTPNotFound(explanation=msg) try: request.get_content_type(('application/octet-stream',)) except exception.InvalidContentType as e: raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg) if self.is_valid_encoding(request) and self.is_valid_method(request): request.is_body_readable = True image_size = request.content_length or None return {'size': image_size, 'data': request.body_file} class ResponseSerializer(wsgi.JSONResponseSerializer): def download(self, response, image): offset, chunk_size = 0, None # NOTE(dharinic): In case of a malformed range header, # glance/common/wsgi.py will raise HTTPRequestRangeNotSatisfiable # (setting status_code to 416) range_val = response.request.get_range_from_request(image.size) if range_val: if isinstance(range_val, webob.byterange.Range): response_end = image.size - 1 # NOTE(dharinic): webob parsing is zero-indexed. # i.e.,to download first 5 bytes of a 10 byte image, # request should be "bytes=0-4" and the response would be # "bytes 0-4/10". # Range if validated, will never have 'start' object as None. if range_val.start >= 0: offset = range_val.start else: # NOTE(dharinic): Negative start values needs to be # processed to allow suffix-length for Range request # like "bytes=-2" as per rfc7233. if abs(range_val.start) < image.size: offset = image.size + range_val.start if range_val.end is not None and range_val.end < image.size: chunk_size = range_val.end - offset response_end = range_val.end - 1 else: chunk_size = image.size - offset # NOTE(dharinic): For backward compatibility reasons, we maintain # support for 'Content-Range' in requests even though it's not # correct to use it in requests. elif isinstance(range_val, webob.byterange.ContentRange): response_end = range_val.stop - 1 # NOTE(flaper87): if not present, both, start # and stop, will be None. offset = range_val.start chunk_size = range_val.stop - offset response.status_int = 206 response.headers['Content-Type'] = 'application/octet-stream' try: # NOTE(markwash): filesystem store (and maybe others?) cause a # problem with the caching middleware if they are not wrapped in # an iterator very strange response.app_iter = iter(image.get_data(offset=offset, chunk_size=chunk_size)) # NOTE(dharinic): In case of a full image download, when # chunk_size was none, reset it to image.size to set the # response header's Content-Length. if chunk_size is not None: response.headers['Content-Range'] = 'bytes %s-%s/%s'\ % (offset, response_end, image.size) else: chunk_size = image.size except glance_store.NotFound as e: raise webob.exc.HTTPNoContent(explanation=e.msg) except glance_store.RemoteServiceUnavailable as e: raise webob.exc.HTTPServiceUnavailable(explanation=e.msg) except (glance_store.StoreGetNotSupported, glance_store.StoreRandomGetNotSupported) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to download image '%s'", image) raise webob.exc.HTTPForbidden(explanation=e.msg) # NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5 # (https://github.com/Pylons/webob/issues/86), so it should be set # afterwards for the time being. if image.checksum: response.headers['Content-MD5'] = image.checksum # NOTE(markwash): "response.app_iter = ..." also erroneously resets the # content-length response.headers['Content-Length'] = str(chunk_size) def upload(self, response, result): response.status_int = 204 def stage(self, response, result): response.status_int = 204 def create_resource(): """Image data resource factory method""" deserializer = RequestDeserializer() serializer = ResponseSerializer() controller = ImageDataController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/image_members.py0000664000175000017500000004606100000000000020564 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client as http import glance_store from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImageMembersController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) def _get_member_repo(self, req, image): try: return self.gateway.get_member_repo(image, req.context) except exception.Forbidden as e: msg = (_("Error fetching members of image %(image_id)s: " "%(inner_msg)s") % {"image_id": image.image_id, "inner_msg": e.msg}) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) def _lookup_image(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: return image_repo.get(image_id) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("You are not authorized to lookup image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) def _check_visibility_and_ownership(self, context, image, ownership_check=None): if image.visibility != 'shared': message = _("Only shared images have members.") raise exception.Forbidden(message) # NOTE(abhishekk): Ownership check only needs to performed while # adding new members to image owner = image.owner if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope) and not context.is_admin: if ownership_check == 'create': if owner is None or owner != context.owner: message = _("You are not permitted to create image " "members for the image.") raise exception.Forbidden(message) elif ownership_check == 'update': if context.owner == owner: message = _("You are not permitted to modify 'status' " "on this image member.") raise exception.Forbidden(message) elif ownership_check == 'delete': if context.owner != owner: message = _("You cannot delete image member.") raise exception.Forbidden(message) def _lookup_member(self, req, image, member_id, member_repo=None): if not member_repo: member_repo = self._get_member_repo(req, image) try: # NOTE(abhishekk): This will verify whether user has permission # to view image member or not. api_policy.MemberAPIPolicy( req.context, image, enforcer=self.policy).get_member() return member_repo.get(member_id) except (exception.NotFound): msg = (_("%(m_id)s not found in the member list of the image " "%(i_id)s.") % {"m_id": member_id, "i_id": image.image_id}) LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = (_("You are not authorized to lookup the members of the " "image %s.") % image.image_id) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) @utils.mutating def create(self, req, image_id, member_id): """ Adds a membership to the image. :param req: the Request object coming from the wsgi layer :param image_id: the image identifier :param member_id: the member identifier :returns: The response body is a mapping of the following form :: {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..} """ try: image = self._lookup_image(req, image_id) # Check for image visibility and ownership before getting member # repo # NOTE(abhishekk): Once we support RBAC policies we can remove # ownership check from here. This is added here just to maintain # behavior with and without auth layer. self._check_visibility_and_ownership(req.context, image, ownership_check='create') member_repo = self._get_member_repo(req, image) # NOTE(abhishekk): This will verify whether user has permission # to accept membership or not. api_policy.MemberAPIPolicy( req.context, image, enforcer=self.policy).add_member() image_member_factory = self.gateway.get_image_member_factory( req.context) new_member = image_member_factory.new_image_member(image, member_id) member_repo.add(new_member) return new_member except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden: msg = _("Not allowed to create members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Duplicate: msg = _("Member %(member_id)s is duplicated for image " "%(image_id)s") % {"member_id": member_id, "image_id": image_id} LOG.warning(msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.ImageMemberLimitExceeded as e: msg = (_("Image member limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) @utils.mutating def update(self, req, image_id, member_id, status): """ Update the status of a member for a given image. :param req: the Request object coming from the wsgi layer :param image_id: the image identifier :param member_id: the member identifier :param status: the status of a member :returns: The response body is a mapping of the following form :: {'member_id': , 'image_id': , 'status': , 'created_at': .., 'updated_at': ..} """ try: image = self._lookup_image(req, image_id) # Check for image visibility and ownership before getting member # repo. # NOTE(abhishekk): Once we support RBAC policies we can remove # ownership check from here. This is added here just to maintain # behavior with and without auth layer. self._check_visibility_and_ownership(req.context, image, ownership_check='update') member_repo = self._get_member_repo(req, image) member = self._lookup_member(req, image, member_id, member_repo=member_repo) api_policy.MemberAPIPolicy( req.context, image, enforcer=self.policy).modify_member() member.status = status member_repo.save(member) return member except exception.Forbidden: msg = _("Not allowed to update members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except ValueError as e: msg = (_("Incorrect request: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) def index(self, req, image_id): """ Return a list of dictionaries indicating the members of the image, i.e., those tenants the image is shared with. :param req: the Request object coming from the wsgi layer :param image_id: The image identifier :returns: The response body is a mapping of the following form :: {'members': [ {'member_id': , 'image_id': , 'status': , 'created_at': .., 'updated_at': ..}, .. ]} """ try: image = self._lookup_image(req, image_id) # Check for image visibility and ownership before getting member # repo. self._check_visibility_and_ownership(req.context, image) member_repo = self._get_member_repo(req, image) # NOTE(abhishekk): This will verify whether user has permission # to view image members or not. Each member will be checked with # get_member policy below. api_policy_check = api_policy.MemberAPIPolicy( req.context, image, enforcer=self.policy) api_policy_check.get_members() except exception.Forbidden as e: msg = (_("Not allowed to list members for image %(image_id)s: " "%(inner_msg)s") % {"image_id": image.image_id, "inner_msg": e.msg}) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) members = [ member for member in member_repo.list() if api_policy_check.check( 'get_member')] return dict(members=members) def show(self, req, image_id, member_id): """ Returns the membership of the tenant wrt to the image_id specified. :param req: the Request object coming from the wsgi layer :param image_id: The image identifier :returns: The response body is a mapping of the following form :: {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..} """ try: image = self._lookup_image(req, image_id) # Check for image visibility and ownership before getting member # repo. self._check_visibility_and_ownership(req.context, image) return self._lookup_member(req, image, member_id) except exception.Forbidden as e: # Convert Forbidden to NotFound to prevent information # leakage. raise webob.exc.HTTPNotFound(explanation=e.msg) except webob.exc.HTTPForbidden as e: # Convert Forbidden to NotFound to prevent information # leakage. raise webob.exc.HTTPNotFound(explanation=e.explanation) @utils.mutating def delete(self, req, image_id, member_id): """ Removes a membership from the image. """ try: image = self._lookup_image(req, image_id) # Check for image visibility and ownership before getting member # repo. # NOTE(abhishekk): Once we support RBAC policies we can remove # ownership check from here. This is added here just to maintain # behavior with and without auth layer. self._check_visibility_and_ownership(req.context, image, ownership_check='delete') member_repo = self._get_member_repo(req, image) member = self._lookup_member(req, image, member_id, member_repo=member_repo) # NOTE(abhishekk): This will verify whether user has permission # to delete image member or not. api_policy.MemberAPIPolicy( req.context, image, enforcer=self.policy).delete_member() member_repo.remove(member) return webob.Response(body='', status=http.NO_CONTENT) except exception.Forbidden: msg = _("Not allowed to delete members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) class RequestDeserializer(wsgi.JSONRequestDeserializer): def __init__(self): super(RequestDeserializer, self).__init__() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def create(self, request): body = self._get_request_body(request) try: member_id = body['member'] if not member_id: raise ValueError() except KeyError: msg = _("Member to be added not specified") raise webob.exc.HTTPBadRequest(explanation=msg) except ValueError: msg = _("Member can't be empty") raise webob.exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _('Expected a member in the form: ' '{"member": "image_id"}') raise webob.exc.HTTPBadRequest(explanation=msg) return dict(member_id=member_id) def update(self, request): body = self._get_request_body(request) try: status = body['status'] except KeyError: msg = _("Status not specified") raise webob.exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _('Expected a status in the form: ' '{"status": "status"}') raise webob.exc.HTTPBadRequest(explanation=msg) return dict(status=status) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _format_image_member(self, member): member_view = {} attributes = ['member_id', 'image_id', 'status'] for key in attributes: member_view[key] = getattr(member, key) member_view['created_at'] = timeutils.isotime(member.created_at) member_view['updated_at'] = timeutils.isotime(member.updated_at) member_view['schema'] = '/v2/schemas/member' member_view = self.schema.filter(member_view) return member_view def create(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def update(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def index(self, response, image_members): image_members = image_members['members'] image_members_view = [] for image_member in image_members: image_member_view = self._format_image_member(image_member) image_members_view.append(image_member_view) totalview = dict(members=image_members_view) totalview['schema'] = '/v2/schemas/members' body = jsonutils.dumps(totalview, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def show(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' _MEMBER_SCHEMA = { 'member_id': { 'type': 'string', 'description': _('An identifier for the image member (tenantId)') }, 'image_id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'created_at': { 'type': 'string', 'description': _('Date and time of image member creation'), # TODO(brian-rosmaita): our jsonschema library doesn't seem to like the # format attribute, figure out why (and also fix in images.py) # 'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': _('Date and time of last modification of image member'), # 'format': 'date-time', }, 'status': { 'type': 'string', 'description': _('The status of this image member'), 'enum': [ 'pending', 'accepted', 'rejected' ] }, 'schema': { 'readOnly': True, 'type': 'string' } } def get_schema(): properties = copy.deepcopy(_MEMBER_SCHEMA) schema = glance.schema.Schema('member', properties) return schema def get_collection_schema(): member_schema = get_schema() return glance.schema.CollectionSchema('members', member_schema) def create_resource(): """Image Members resource factory method""" deserializer = RequestDeserializer() serializer = ResponseSerializer() controller = ImageMembersController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/image_tags.py0000664000175000017500000001113000000000000020055 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import glance_store from oslo_log import log as logging from oslo_utils import encodeutils import webob.exc from glance.api import policy from glance.api.v2 import images as v2_api from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier LOG = logging.getLogger(__name__) class Controller(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def update(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) api_policy.ImageAPIPolicy(req.context, image, self.policy).modify_image() image.tags.add(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to update tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Invalid as e: msg = (_("Could not update image: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.ImageTagLimitExceeded as e: msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) @utils.mutating def delete(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) api_policy.ImageAPIPolicy(req.context, image, self.policy).modify_image() if tag_value not in image.tags: raise webob.exc.HTTPNotFound() image.tags.remove(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to delete tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def update(self, response, result): response.status_int = http.NO_CONTENT def delete(self, response, result): response.status_int = http.NO_CONTENT class RequestDeserializer(wsgi.JSONRequestDeserializer): def update(self, request): try: schema = v2_api.get_schema() schema_format = {"tags": [request.urlvars.get('tag_value')]} schema.validate(schema_format) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return super(RequestDeserializer, self).default(request) def create_resource(): """Images resource factory method""" serializer = ResponseSerializer() deserializer = RequestDeserializer() controller = Controller() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/images.py0000664000175000017500000026547000000000000017244 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import http.client as http import os import re import urllib.parse as urlparse import uuid from castellan.common import exception as castellan_exception from castellan import key_manager import glance_store from glance_store import location from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_utils import encodeutils from oslo_utils import timeutils as oslo_timeutils import requests import webob.exc from glance.api import common from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import store_utils from glance.common import timeutils from glance.common import utils from glance.common import wsgi from glance import context as glance_context import glance.db import glance.gateway from glance.i18n import _, _LE, _LI, _LW import glance.notifier from glance.quota import keystone as ks_quota import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') CONF.import_opt('container_formats', 'glance.common.config', group='image_format') CONF.import_opt('show_multiple_locations', 'glance.common.config') CONF.import_opt('hashing_algorithm', 'glance.common.config') def proxy_response_error(orig_code, orig_explanation): """Construct a webob.exc.HTTPError exception on the fly. The webob.exc.HTTPError classes are statically defined, intended to be straight subclasses of HTTPError, specifically with *class* level definitions of things we need to be dynamic. This method returns an exception class instance with those values set programmatically so we can raise it to mimic the response we got from a remote. """ class ProxiedResponse(webob.exc.HTTPError): code = orig_code title = orig_explanation return ProxiedResponse() class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) self._key_manager = key_manager.API(CONF) @utils.mutating def create(self, req, image, extra_properties, tags): image_factory = self.gateway.get_image_factory(req.context) image_repo = self.gateway.get_repo(req.context) try: if 'owner' not in image: image['owner'] = req.context.project_id api_policy.ImageAPIPolicy(req.context, image, self.policy).add_image() ks_quota.enforce_image_count_total(req.context, req.context.owner) image = image_factory.new_image(extra_properties=extra_properties, tags=tags, **image) image_repo.add(image) except (exception.DuplicateLocation, exception.Invalid) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except (exception.ReservedProperty, exception.ReadonlyProperty) as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to create image") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.LimitExceeded as e: LOG.warning(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) except TypeError as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e) return image def _bust_import_lock(self, admin_image_repo, admin_task_repo, image, task, task_id): if task: # FIXME(danms): It would be good if we had a 'canceled' or # 'aborted' status here. try: task.fail('Expired lock preempted') admin_task_repo.save(task) except exception.InvalidTaskStatusTransition: # NOTE(danms): This may happen if we try to fail a # task that is in a terminal state, but where the lock # was never dropped from the image. We will log the # image, task, and status below so we can just ignore # here. pass try: admin_image_repo.delete_property_atomic( image, 'os_glance_import_task', task_id) except exception.NotFound: LOG.warning('Image %(image)s has stale import task %(task)s ' 'but we lost the race to remove it.', {'image': image.image_id, 'task': task_id}) # We probably lost the race to expire the old lock, but # act like it is not yet expired to avoid a retry loop. raise exception.Conflict('Image has active task') LOG.warning('Image %(image)s has stale import task %(task)s ' 'in status %(status)s from %(owner)s; removed lock ' 'because it had expired.', {'image': image.image_id, 'task': task_id, 'status': task and task.status or 'missing', 'owner': task and task.owner or 'unknown owner'}) def _enforce_import_lock(self, req, image): admin_context = req.context.elevated() admin_image_repo = self.gateway.get_repo(admin_context) admin_task_repo = self.gateway.get_task_repo(admin_context) other_task = image.extra_properties['os_glance_import_task'] expiry = datetime.timedelta(minutes=60) bustable_states = ('pending', 'processing', 'success', 'failure') try: task = admin_task_repo.get(other_task) except exception.NotFound: # NOTE(danms): This could happen if we failed to do an import # a long time ago, and the task record has since been culled from # the database, but the task id is still in the lock field. LOG.warning('Image %(image)s has non-existent import ' 'task %(task)s; considering it stale', {'image': image.image_id, 'task': other_task}) task = None age = 0 else: age = oslo_timeutils.utcnow() - task.updated_at if task.status == 'pending': # NOTE(danms): Tasks in pending state could be queued, # blocked or otherwise right-about-to-get-going, so we # double the expiry time for safety. We will report # time remaining below, so this is not too obscure. expiry *= 2 if not task or (task.status in bustable_states and age >= expiry): self._bust_import_lock(admin_image_repo, admin_task_repo, image, task, other_task) return task if task.status in bustable_states: LOG.warning('Image %(image)s has active import task %(task)s in ' 'status %(status)s; lock remains valid for %(expire)i ' 'more seconds', {'image': image.image_id, 'task': task.task_id, 'status': task.status, 'expire': (expiry - age).total_seconds()}) else: LOG.debug('Image %(image)s has import task %(task)s in status ' '%(status)s and does not qualify for expiry.', {'image': image.image_id, 'task': task.task_id, 'status': task.status}) raise exception.Conflict('Image has active task') def _cleanup_stale_task_progress(self, image_repo, image, task): """Cleanup stale in-progress information from a previous task. If we stole the lock from another task, we should try to clean up the in-progress status information from that task while we have the lock. """ stores = task.task_input.get('backend', []) keys = ['os_glance_importing_to_stores', 'os_glance_failed_import'] changed = set() for store in stores: for key in keys: values = image.extra_properties.get(key, '').split(',') if store in values: values.remove(store) changed.add(key) image.extra_properties[key] = ','.join(values) if changed: image_repo.save(image) LOG.debug('Image %(image)s had stale import progress info ' '%(keys)s from task %(task)s which was cleaned up', {'image': image.image_id, 'task': task.task_id, 'keys': ','.join(changed)}) def _proxy_request_to_stage_host(self, image, req, body=None): """Proxy a request to a staging host. When an image was staged on another worker, that worker may record its worker_self_reference_url on the image, indicating that other workers should proxy requests to it while the image is staged. This method replays our current request against the remote host, returns the result, and performs any response error translation required. The remote request-id is used to replace the one on req.context so that a client sees the proper id used for the actual action. :param image: The Image from the repo :param req: The webob.Request from the current request :param body: The request body or None :returns: The result from the remote host :raises: webob.exc.HTTPClientError matching the remote's error, or webob.exc.HTTPServerError if we were unable to contact the remote host. """ stage_host = image.extra_properties['os_glance_stage_host'] LOG.info(_LI('Proxying %s request to host %s ' 'which has image staged'), req.method, stage_host) client = glance_context.get_ksa_client(req.context) url = '%s%s' % (stage_host, req.path) req_id_hdr = 'x-openstack-request-id' request_method = getattr(client, req.method.lower()) try: r = request_method(url, json=body, timeout=60) except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as e: LOG.error(_LE('Failed to proxy to %r: %s'), url, e) raise webob.exc.HTTPGatewayTimeout('Stage host is unavailable') except requests.exceptions.RequestException as e: LOG.error(_LE('Failed to proxy to %r: %s'), url, e) raise webob.exc.HTTPBadGateway('Stage host is unavailable') req_id_hdr = 'x-openstack-request-id' if req_id_hdr in r.headers: LOG.debug('Replying with remote request id %s', ( r.headers[req_id_hdr])) req.context.request_id = r.headers[req_id_hdr] if r.status_code // 100 != 2: raise proxy_response_error(r.status_code, r.reason) return image.image_id @property def self_url(self): """Return the URL we expect to point to us. If this is set to a per-worker URL in worker_self_reference_url, that takes precedence. Otherwise we fall back to public_endpoint. """ return CONF.worker_self_reference_url or CONF.public_endpoint def is_proxyable(self, image): """Decide if an action is proxyable to a stage host. If the image has a staging host recorded with a URL that does not match ours, then we can proxy our request to that host. :param image: The Image from the repo :returns: bool indicating proxyable status """ return ( 'os_glance_stage_host' in image.extra_properties and image.extra_properties['os_glance_stage_host'] != self.self_url) @utils.mutating def import_image(self, req, image_id, body): ctxt = req.context image_repo = self.gateway.get_repo(ctxt) task_factory = self.gateway.get_task_factory(ctxt) task_repo = self.gateway.get_task_repo(ctxt) import_method = body.get('method').get('name') uri = body.get('method').get('uri') all_stores_must_succeed = body.get('all_stores_must_succeed', True) stole_lock_from_task = None try: ks_quota.enforce_image_size_total(req.context, req.context.owner) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) try: image = image_repo.get(image_id) if image.status == 'active' and import_method != "copy-image": msg = _("Image with status active cannot be target for import") raise exception.Conflict(msg) if image.status != 'active' and import_method == "copy-image": msg = _("Only images with status active can be targeted for " "copying") raise exception.Conflict(msg) if (image.status != 'queued' and import_method in ['web-download', 'glance-download']): msg = _("Image needs to be in 'queued' state to use " "'%s' method") % import_method raise exception.Conflict(msg) if (image.status != 'uploading' and import_method == 'glance-direct'): msg = _("Image needs to be staged before 'glance-direct' " "method can be used") raise exception.Conflict(msg) if not getattr(image, 'container_format', None): msg = _("'container_format' needs to be set before import") raise exception.Conflict(msg) if not getattr(image, 'disk_format', None): msg = _("'disk_format' needs to be set before import") raise exception.Conflict(msg) if import_method == 'glance-download': if 'glance_region' not in body.get('method'): msg = _("'glance_region' needs to be set for " "glance-download import method") raise webob.exc.HTTPBadRequest(explanation=msg) if 'glance_image_id' not in body.get('method'): msg = _("'glance_image_id' needs to be set for " "glance-download import method") raise webob.exc.HTTPBadRequest(explanation=msg) try: uuid.UUID(body['method']['glance_image_id']) except ValueError: msg = (_("Remote image id does not look like a UUID: %s") % body['method']['glance_image_id']) raise webob.exc.HTTPBadRequest(explanation=msg) if 'glance_service_interface' not in body.get('method'): body.get('method')['glance_service_interface'] = 'public' # NOTE(danms): For copy-image only, we check policy to decide # if the user should be able to do this. Otherwise, we forbid # the import if the user is not the owner. api_pol = api_policy.ImageAPIPolicy(req.context, image, enforcer=self.policy) if import_method == 'copy-image': api_pol.copy_image() else: # NOTE(abhishekk): We need to perform ownership check on image # so that non-admin or non-owner can not import data to image api_pol.modify_image() if 'os_glance_import_task' in image.extra_properties: # NOTE(danms): This will raise exception.Conflict if the # lock is present and valid, or return if absent or invalid. stole_lock_from_task = self._enforce_import_lock(req, image) stores = [None] if CONF.enabled_backends: try: stores = utils.get_stores_from_request(req, body) except glance_store.UnknownScheme as exc: LOG.warning(exc.msg) raise exception.Conflict(exc.msg) # NOTE(abhishekk): If all_stores is specified and import_method is # copy_image, then remove those stores where image is already # present. all_stores = body.get('all_stores', False) if import_method == 'copy-image' and all_stores: for loc in image.locations: existing_store = loc['metadata']['store'] if existing_store in stores: LOG.debug("Removing store '%s' from all stores as " "image is already available in that " "store.", existing_store) stores.remove(existing_store) if len(stores) == 0: LOG.info(_LI("Exiting copying workflow as image is " "available in all configured stores.")) return image_id # validate if image is already existing in given stores when # all_stores is False if import_method == 'copy-image' and not all_stores: for loc in image.locations: existing_store = loc['metadata']['store'] if existing_store in stores: msg = _("Image is already present at store " "'%s'") % existing_store raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Conflict as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) if (not all_stores_must_succeed) and (not CONF.enabled_backends): msg = (_("All_stores_must_succeed can only be set with " "enabled_backends %s") % uri) raise webob.exc.HTTPBadRequest(explanation=msg) if self.is_proxyable(image) and import_method == 'glance-direct': # NOTE(danms): Image is staged on another worker; proxy the # import request to that worker with the user's token, as if # they had called it themselves. return self._proxy_request_to_stage_host(image, req, body) task_input = {'image_id': image_id, 'import_req': body, 'backend': stores} if import_method == 'copy-image': # If this is a copy-image import and we passed the policy check, # grab an admin context for the task so it can manipulate metadata # as admin. admin_context = ctxt.elevated() else: admin_context = None executor_factory = self.gateway.get_task_executor_factory( ctxt, admin_context=admin_context) if (import_method == 'web-download' and not utils.validate_import_uri(uri)): LOG.debug("URI for web-download does not pass filtering: %s", uri) msg = (_("URI for web-download does not pass filtering: %s") % uri) raise webob.exc.HTTPBadRequest(explanation=msg) try: import_task = task_factory.new_task(task_type='api_image_import', owner=ctxt.owner, task_input=task_input, image_id=image_id, user_id=ctxt.user_id, request_id=ctxt.request_id) # NOTE(danms): Try to grab the lock for this task try: image_repo.set_property_atomic(image, 'os_glance_import_task', import_task.task_id) except exception.Duplicate: msg = (_("New operation on image '%s' is not permitted as " "prior operation is still in progress") % image_id) raise exception.Conflict(msg) # NOTE(danms): We now have the import lock on this image. If we # busted the lock above and have a reference to that task, try # to clean up the import status information left over from that # execution. if stole_lock_from_task: self._cleanup_stale_task_progress(image_repo, image, stole_lock_from_task) task_repo.add(import_task) task_executor = executor_factory.new_task_executor(ctxt) pool = common.get_thread_pool("tasks_pool") pool.spawn(import_task.run, task_executor) except exception.Forbidden as e: LOG.debug("User not permitted to create image import task.") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Conflict as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) except ValueError as e: LOG.debug("Cannot import data for image %(id)s: %(e)s", {'id': image_id, 'e': encodeutils.exception_to_unicode(e)}) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) return image_id def index(self, req, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, member_status='accepted'): sort_key = ['created_at'] if not sort_key else sort_key sort_dir = ['desc'] if not sort_dir else sort_dir result = {} if filters is None: filters = {} filters['deleted'] = False os_hidden = filters.get('os_hidden', 'false').lower() if os_hidden not in ['true', 'false']: message = _("Invalid value '%s' for 'os_hidden' filter." " Valid values are 'true' or 'false'.") % os_hidden raise webob.exc.HTTPBadRequest(explanation=message) # ensure the type of os_hidden is boolean filters['os_hidden'] = os_hidden == 'true' protected = filters.get('protected') if protected is not None: if protected not in ['true', 'false']: message = _("Invalid value '%s' for 'protected' filter." " Valid values are 'true' or 'false'.") % protected raise webob.exc.HTTPBadRequest(explanation=message) # ensure the type of protected is boolean filters['protected'] = protected == 'true' if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) image_repo = self.gateway.get_repo(req.context) try: # NOTE(danms): This is just a "do you have permission to # list images" check. Each image is checked against # get_image below. target = {'project_id': req.context.project_id} self.policy.enforce(req.context, 'get_images', target) images = image_repo.list(marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, member_status=member_status) db_image_count = len(images) images = [image for image in images if api_policy.ImageAPIPolicy(req.context, image, self.policy ).check('get_image')] # NOTE(danms): we need to include the next marker if the DB # paginated. Since we filter images based on policy, we can # not determine if pagination happened from the final list, # so use the original count. if len(images) != 0 and db_image_count == limit: result['next_marker'] = images[-1].image_id except (exception.NotFound, exception.InvalidSortKey, exception.InvalidFilterRangeValue, exception.InvalidParameterValue, exception.InvalidFilterOperatorValue) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to retrieve images index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) result['images'] = images return result def show(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) api_policy.ImageAPIPolicy(req.context, image, self.policy).get_image() return image except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) def get_task_info(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: # NOTE (abhishekk): Just to check image is valid image = image_repo.get(image_id) # Check you are authorized to fetch image details api_policy.ImageAPIPolicy(req.context, image, self.policy).get_image() except (exception.NotFound, exception.Forbidden): raise webob.exc.HTTPNotFound() tasks = self.db_api.tasks_get_by_image(req.context, image.image_id) return {"tasks": tasks} @utils.mutating def update(self, req, image_id, changes): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) for change in changes: change_method_name = '_do_%s' % change['op'] change_method = getattr(self, change_method_name) change_method(req, image, api_pol, change) if changes: image_repo.save(image) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except (exception.Invalid, exception.BadStoreUri) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to update image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the" " quota: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, request=req, content_type='text/plain') except exception.LimitExceeded as e: LOG.exception(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) return image def _do_replace(self, req, image, api_pol, change): path = change['path'] path_root = path[0] value = change['value'] if path_root == 'locations' and not value: msg = _("Cannot set locations to empty list.") raise webob.exc.HTTPForbidden(msg) elif path_root == 'locations' and value: api_pol.update_locations() self._do_replace_locations(image, value) elif path_root == 'owner' and req.context.is_admin == False: msg = _("Owner can't be updated by non admin.") raise webob.exc.HTTPForbidden(msg) else: api_pol.update_property(path_root, value) if hasattr(image, path_root): setattr(image, path_root, value) elif path_root in image.extra_properties: image.extra_properties[path_root] = value else: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % path_root) def _do_add(self, req, image, api_pol, change): path = change['path'] path_root = path[0] value = change['value'] json_schema_version = change.get('json_schema_version', 10) if path_root == 'locations': api_pol.update_locations() self._do_add_locations(image, path[1], value, req.context) else: api_pol.update_property(path_root, value) if ((hasattr(image, path_root) or path_root in image.extra_properties) and json_schema_version == 4): msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % path_root) if hasattr(image, path_root): setattr(image, path_root, value) else: image.extra_properties[path_root] = value def _do_remove(self, req, image, api_pol, change): path = change['path'] path_root = path[0] if path_root == 'locations': api_pol.delete_locations() try: self._do_remove_locations(image, path[1]) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(e.msg) else: api_pol.update_property(path_root) if hasattr(image, path_root): msg = _("Property %s may not be removed.") raise webob.exc.HTTPForbidden(msg % path_root) elif path_root in image.extra_properties: del image.extra_properties[path_root] else: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % path_root) def _delete_encryption_key(self, context, image): props = image.extra_properties cinder_encryption_key_id = props.get('cinder_encryption_key_id') if cinder_encryption_key_id is None: return deletion_policy = props.get('cinder_encryption_key_deletion_policy', '') if deletion_policy != 'on_image_deletion': return try: self._key_manager.delete(context, cinder_encryption_key_id) except castellan_exception.Forbidden: msg = ('Not allowed to delete encryption key %s' % cinder_encryption_key_id) LOG.warning(msg) except (castellan_exception.ManagedObjectNotFoundError, KeyError): msg = 'Could not find encryption key %s' % cinder_encryption_key_id LOG.warning(msg) except castellan_exception.KeyManagerError: msg = ('Failed to delete cinder encryption key %s' % cinder_encryption_key_id) LOG.warning(msg) @utils.mutating def delete_from_store(self, req, store_id, image_id): if not CONF.enabled_backends: raise webob.exc.HTTPNotFound() if store_id not in CONF.enabled_backends: msg = (_("The selected store %s is not available on this node.") % store_id) raise webob.exc.HTTPConflict(explanation=msg) image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) except exception.NotFound: msg = (_("Failed to find image %(image_id)s") % {'image_id': image_id}) raise webob.exc.HTTPNotFound(explanation=msg) # NOTE(abhishekk): Delete from store internally checks for # get_image_location and delete_image_location policies using # ImageLocationProxy object, so this is the right place to # check those policies api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.get_image_location() # This policy will check for legacy image ownership as well try: api_pol.delete_locations() except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) if image.status != 'active': msg = _("It's not allowed to remove image data from store if " "image status is not 'active'") raise webob.exc.HTTPConflict(explanation=msg) if len(image.locations) == 1: LOG.debug("User forbidden to remove last location of image %s", image_id) msg = _("Cannot delete image data from the only store containing " "it. Consider deleting the image instead.") raise webob.exc.HTTPForbidden(explanation=msg) try: # NOTE(jokke): Here we go through the locations list and act on # the first hit. image.locations.pop() will actually remove the # data from the backend as well as remove the location object # from the list. for pos, loc in enumerate(image.locations): if loc['metadata'].get('store') == store_id: image.locations.pop(pos) break else: msg = (_("Image %(iid)s is not stored in store %(sid)s.") % {'iid': image_id, 'sid': store_id}) raise exception.Invalid(msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Invalid as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.exceptions.HasSnapshot as e: raise webob.exc.HTTPConflict(explanation=e.msg) except glance_store.exceptions.InUseByStore as e: msg = ("The data for Image %(id)s could not be deleted " "because it is in use: %(exc)s" % {"id": image_id, "exc": e.msg}) LOG.warning(msg) raise webob.exc.HTTPConflict(explanation=msg) except Exception as e: raise webob.exc.HTTPInternalServerError( explanation=encodeutils.exception_to_unicode(e)) image_repo.save(image) def _delete_image_on_remote(self, image, req): """Proxy an image delete to a staging host. When an image is staged and then deleted, the staging host still has local residue that needs to be cleaned up. If the request to delete arrived here, but we are not the stage host, we need to proxy it to the appropriate host. If the delete succeeds, we return None (per DELETE semantics), indicating to the caller that it was handled. If the delete fails on the remote end, we allow the HTTPClientError to bubble to our caller, which will return the error to the client. If we fail to contact the remote server, we catch the HTTPServerError raised by our proxy method, verify that the image still exists, and return it. That indicates to the caller that it should proceed with the regular delete logic, which will satisfy the client's request, but leave the residue on the stage host (which is unavoidable). :param image: The Image from the repo :param req: The webob.Request for this call :returns: None if successful, or a refreshed image if the proxy failed. :raises: webob.exc.HTTPClientError if so raised by the remote server. """ try: self._proxy_request_to_stage_host(image, req) except webob.exc.HTTPServerError: # This means we would have raised a 50x error, indicating # we did not succeed with the request to the remote host. # In this case, refresh the image from the repo, and if it # is not deleted, allow the regular delete process to # continue on the local worker to match the user's # expectations. If the image is already deleted, the caller # will catch this NotFound like normal. return self.gateway.get_repo(req.context).get(image.image_id) @utils.mutating def delete(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) # NOTE(abhishekk): This is the right place to check whether user # have permission to delete the image and remove the policy check # later from the policy layer. api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.delete_image() if self.is_proxyable(image): # NOTE(danms): Image is staged on another worker; proxy the # delete request to that worker with the user's token, as if # they had called it themselves. image = self._delete_image_on_remote(image, req) if image is None: # Delete was proxied, so we are done here. return # NOTE(abhishekk): Delete the data from staging area if CONF.enabled_backends: separator, staging_dir = store_utils.get_dir_separator() file_path = "%s%s%s" % (staging_dir, separator, image_id) try: fn_call = glance_store.get_store_from_store_identifier staging_store = fn_call('os_glance_staging_store') loc = location.get_location_from_uri_and_backend( file_path, 'os_glance_staging_store') staging_store.delete(loc) except (glance_store.exceptions.NotFound, glance_store.exceptions.UnknownScheme): pass else: file_path = str( CONF.node_staging_uri + '/' + image_id)[7:] if os.path.exists(file_path): try: LOG.debug( "After upload to the backend, deleting staged " "image data from %(fn)s", {'fn': file_path}) os.unlink(file_path) except OSError as e: LOG.error( "After upload to backend, deletion of staged " "image data from %(fn)s has failed because " "[Errno %(en)d]", {'fn': file_path, 'en': e.errno}) else: LOG.warning(_( "After upload to backend, deletion of staged " "image data has failed because " "it cannot be found at %(fn)s"), {'fn': file_path}) image.delete() self._delete_encryption_key(req.context, image) image_repo.remove(image) except (glance_store.Forbidden, exception.Forbidden) as e: LOG.debug("User not permitted to delete image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound): msg = (_("Failed to find image %(image_id)s to delete") % {'image_id': image_id}) LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except glance_store.exceptions.InUseByStore as e: msg = (_("Image %(id)s could not be deleted " "because it is in use: %(exc)s") % {"id": image_id, "exc": e.msg}) LOG.warning(msg) raise webob.exc.HTTPConflict(explanation=msg) except glance_store.exceptions.HasSnapshot as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) def _validate_hashing_data(self, val_data): if 'os_hash_value' in val_data: try: hashval = bytearray.fromhex(val_data['os_hash_value']) except ValueError: msg = (_("os_hash_value (%s) is not a valid hexadecimal" " value") % (val_data['os_hash_value'])) raise webob.exc.HTTPBadRequest(explanation=msg) hash_algo = val_data.get('os_hash_algo', CONF['hashing_algorithm']) want_size = hashlib.new(hash_algo).digest_size if len(hashval) != want_size: msg = (_("os_hash_value: (%(value)s) is not the correct " "size for (%(algo)s) " "(should be (%(want)d) bytes)") % {'value': val_data['os_hash_value'], 'algo': hash_algo, 'want': want_size}) raise webob.exc.HTTPBadRequest(explanation=msg) def _validate_validation_data(self, image, locations): val_data = {} for loc in locations: if 'validation_data' not in loc: continue for k, v in loc['validation_data'].items(): if val_data.get(k, v) != v: msg = _("Conflicting values for %s") % k raise webob.exc.HTTPConflict(explanation=msg) val_data[k] = v # NOTE(imacdonn): values may be provided for items which are # already set, so long as the values exactly match. In this # case, nothing actually needs to be updated, but we should # reject the request if there's an apparent attempt to supply # a different value. new_val_data = {} for k, v in val_data.items(): current = getattr(image, k) if v == current: continue if current: msg = _("%s is already set with a different value") % k raise webob.exc.HTTPConflict(explanation=msg) new_val_data[k] = v if not new_val_data: return {} if image.status != 'queued': msg = _("New value(s) for %s may only be provided when image " "status is 'queued'") % ', '.join(new_val_data.keys()) raise webob.exc.HTTPConflict(explanation=msg) if 'checksum' in new_val_data: try: checksum_bytes = bytearray.fromhex(new_val_data['checksum']) except ValueError: msg = (_("checksum (%s) is not a valid hexadecimal value") % new_val_data['checksum']) raise webob.exc.HTTPConflict(explanation=msg) if len(checksum_bytes) != 16: msg = (_("checksum (%s) is not the correct size for md5 " "(should be 16 bytes)") % new_val_data['checksum']) raise webob.exc.HTTPConflict(explanation=msg) hash_algo = new_val_data.get('os_hash_algo') if hash_algo != CONF['hashing_algorithm']: msg = (_("os_hash_algo must be %(want)s, not %(got)s") % {'want': CONF['hashing_algorithm'], 'got': hash_algo}) raise webob.exc.HTTPConflict(explanation=msg) try: hash_bytes = bytearray.fromhex(new_val_data['os_hash_value']) except ValueError: msg = (_("os_hash_value (%s) is not a valid hexadecimal value") % new_val_data['os_hash_value']) raise webob.exc.HTTPConflict(explanation=msg) want_size = hashlib.new(hash_algo).digest_size if len(hash_bytes) != want_size: msg = (_("os_hash_value (%(value)s) is not the correct size for " "%(algo)s (should be %(want)d bytes)") % {'value': new_val_data['os_hash_value'], 'algo': hash_algo, 'want': want_size}) raise webob.exc.HTTPConflict(explanation=msg) return new_val_data def _get_locations_op_pos(self, path_pos, max_pos, allow_max): if path_pos is None or max_pos is None: return None pos = max_pos if allow_max else max_pos - 1 if path_pos.isdigit(): pos = int(path_pos) elif path_pos != '-': return None if not (allow_max or 0 <= pos < max_pos): return None return pos def _do_replace_locations(self, image, value): if CONF.show_multiple_locations == False: msg = _("It's not allowed to update locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) if image.status not in ('active', 'queued'): msg = _("It's not allowed to replace locations if image status is " "%s.") % image.status raise webob.exc.HTTPConflict(explanation=msg) val_data = self._validate_validation_data(image, value) # NOTE(abhishekk): get glance store based on location uri updated_location = value if CONF.enabled_backends: updated_location = store_utils.get_updated_store_location( value) try: # NOTE(flwang): _locations_proxy's setattr method will check if # the update is acceptable. image.locations = updated_location if image.status == 'queued': for k, v in val_data.items(): setattr(image, k, v) image.status = 'active' except (exception.BadStoreUri, exception.DuplicateLocation) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except ValueError as ve: # update image status failed. raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(ve)) def _do_add_locations(self, image, path_pos, value, context): if CONF.show_multiple_locations == False: msg = _("It's not allowed to add locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) if image.status not in ('active', 'queued'): msg = _("It's not allowed to add locations if image status is " "%s.") % image.status raise webob.exc.HTTPConflict(explanation=msg) val_data = self._validate_validation_data(image, [value]) # NOTE(abhishekk): get glance store based on location uri updated_location = value if CONF.enabled_backends: updated_location = store_utils.get_updated_store_location( [value], context=context)[0] pos = self._get_locations_op_pos(path_pos, len(image.locations), True) if pos is None: msg = _("Invalid position for adding a location.") raise webob.exc.HTTPBadRequest(explanation=msg) try: image.locations.insert(pos, updated_location) if image.status == 'queued': for k, v in val_data.items(): setattr(image, k, v) image.status = 'active' except (exception.BadStoreUri, exception.DuplicateLocation) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except ValueError as e: # update image status failed. raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) def _do_remove_locations(self, image, path_pos): if CONF.show_multiple_locations == False: msg = _("It's not allowed to remove locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) if image.status not in ('active'): msg = _("It's not allowed to remove locations if image status is " "%s.") % image.status raise webob.exc.HTTPConflict(explanation=msg) if len(image.locations) == 1: LOG.debug("User forbidden to remove last location of image %s", image.image_id) msg = _("Cannot remove last location in the image.") raise exception.Forbidden(msg) pos = self._get_locations_op_pos(path_pos, len(image.locations), False) if pos is None: msg = _("Invalid position for removing a location.") raise webob.exc.HTTPBadRequest(explanation=msg) try: # NOTE(zhiyan): this actually deletes the location # from the backend store. image.locations.pop(pos) # TODO(jokke): Fix this, we should catch what store throws and # provide definitely something else than IternalServerError to user. except Exception as e: raise webob.exc.HTTPInternalServerError( explanation=encodeutils.exception_to_unicode(e)) def add_location(self, req, image_id, body): url = body.get('url') validation_data = body.get('validation_data', {}) image_repo = self.gateway.get_repo(req.context) ctxt = req.context stole_lock_from_task = None task_factory = self.gateway.get_task_factory(ctxt) task_repo = self.gateway.get_task_repo(ctxt) try: image = image_repo.get(image_id) if image.status != 'queued': msg = _("It's not allowed to add locations if image status is " "%s.") % image.status raise webob.exc.HTTPConflict(explanation=msg) api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.add_location() roles = list(set(req.context.roles + req.context.service_roles)) if 'service' not in roles: # NOTE(pdeore): Add location API is disabled for other stores # than http if not utils.is_http_store_configured(url): msg = _("http store must be enabled to use location API" " by normal user.") raise exception.Forbidden(msg) if validation_data is not None: self._validate_hashing_data(validation_data) if 'os_glance_import_task' in image.extra_properties: # NOTE(pdeore): This will raise exception.Conflict if the # lock is present and valid, or return if absent or invalid. stole_lock_from_task = self._enforce_import_lock(req, image) task_input = {'image_id': image_id, 'loc_url': url, 'validation_data': validation_data} executor_factory = self.gateway.get_task_executor_factory( ctxt) add_location_task = task_factory.new_task( task_type='location_import', owner=ctxt.owner, task_input=task_input, image_id=image_id, user_id=ctxt.user_id, request_id=ctxt.request_id) try: # NOTE(pdeore): Try to grab the lock for this task image_repo.set_property_atomic(image, 'os_glance_import_task', add_location_task.task_id) except exception.Duplicate: msg = (_("New operation on image '%s' is not " "permitted as prior operation is still " "in progress") % image_id) raise exception.Conflict(msg) # NOTE(pdeore): We now have the import lock on this image. # If we busted the lock above and have a reference to that # task, try to clean up the import status information left # over from that execution. if stole_lock_from_task: self._cleanup_stale_task_progress(image_repo, image, stole_lock_from_task) task_repo.add(add_location_task) task_executor = executor_factory.new_task_executor(ctxt) pool = common.get_thread_pool("tasks_pool") pool.spawn(add_location_task.run, task_executor) except exception.Conflict as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to add location to image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) except ValueError as e: raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) return image_id def get_locations(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) # NOTE(pdeore): This is the right place to check whether user # have permission to get the image locations api_pol = api_policy.ImageAPIPolicy(req.context, image, self.policy) api_pol.get_locations() locations = list(image.locations) for loc in locations: loc.pop('id', None) loc.pop('status', None) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to get the image locations.") raise webob.exc.HTTPForbidden(explanation=e.msg) return locations class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ('direct_url', 'self', 'file', 'schema', 'stores') _readonly_properties = ('created_at', 'updated_at', 'status', 'checksum', 'size', 'virtual_size', 'direct_url', 'self', 'file', 'schema', 'id', 'os_hash_algo', 'os_hash_value') _reserved_properties = ('location', 'deleted', 'deleted_at') _reserved_namespaces = (common.GLANCE_RESERVED_NS,) _base_properties = ('checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'virtual_size', 'status', 'tags', 'owner', 'updated_at', 'visibility', 'protected', 'os_hidden') _available_sort_keys = ('name', 'status', 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at') _default_sort_key = 'created_at' _default_sort_dir = 'desc' _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} _supported_operations = ('add', 'remove', 'replace') def __init__(self, schema=None, location_schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() self.location_schema = location_schema or get_location_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) image = {} properties = body tags = properties.pop('tags', []) for key in self._base_properties: try: # NOTE(flwang): Instead of changing the _check_unexpected # of ImageFactory. It would be better to do the mapping # at here. if key == 'id': image['image_id'] = properties.pop(key) else: image[key] = properties.pop(key) except KeyError: pass # NOTE(abhishekk): Check if custom property key name is less than 255 # characters. Reference LP #1737952 for key in properties: if len(key) > 255: msg = (_("Custom property should not be greater than 255 " "characters.")) raise webob.exc.HTTPBadRequest(explanation=msg) if key in self._reserved_properties: msg = _("Attribute '%s' is reserved.") % key raise webob.exc.HTTPForbidden(msg) if any(key.startswith(ns) for ns in self._reserved_namespaces): msg = _("Attribute '%s' is reserved.") % key raise webob.exc.HTTPForbidden(msg) return dict(image=image, extra_properties=properties, tags=tags) def _get_change_operation_d10(self, raw_change): op = raw_change.get('op') if op is None: msg = (_('Unable to find `op` in JSON Schema change. ' 'It must be one of the following: %(available)s.') % {'available': ', '.join(self._supported_operations)}) raise webob.exc.HTTPBadRequest(explanation=msg) if op not in self._supported_operations: msg = (_('Invalid operation: `%(op)s`. ' 'It must be one of the following: %(available)s.') % {'op': op, 'available': ', '.join(self._supported_operations)}) raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_operation_d4(self, raw_change): op = None for key in self._supported_operations: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path_d10(self, raw_change): try: return raw_change['path'] except KeyError: msg = _("Unable to find '%s' in JSON Schema change") % 'path' raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_path_d4(self, raw_change, op): return raw_change[op] def _decode_json_pointer(self, pointer): """Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret def _validate_json_pointer(self, pointer): """Validate a json pointer. We only accept a limited form of json pointers. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if re.search(r'/\s*?/', pointer[1:]): msg = _('Pointer `%s` contains adjacent "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if len(pointer) > 1 and pointer.endswith('/'): msg = _('Pointer `%s` end with "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if pointer[1:].strip() == '/': msg = _('Pointer `%s` does not contains valid token.') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if re.search('~[^01]', pointer) or pointer.endswith('~'): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): path_root = change['path'][0] if path_root in self._readonly_properties: msg = _("Attribute '%s' is read-only.") % path_root raise webob.exc.HTTPForbidden(explanation=msg) if path_root in self._reserved_properties: msg = _("Attribute '%s' is reserved.") % path_root raise webob.exc.HTTPForbidden(explanation=msg) if any(path_root.startswith(ns) for ns in self._reserved_namespaces): msg = _("Attribute '%s' is reserved.") % path_root raise webob.exc.HTTPForbidden(explanation=msg) if change['op'] == 'remove': return partial_image = None if len(change['path']) == 1: partial_image = {path_root: change['value']} elif ((path_root in get_base_properties().keys()) and (get_base_properties()[path_root].get('type', '') == 'array')): # NOTE(zhiyan): client can use the PATCH API to add an element # directly to an existing property # Such as: 1. using '/locations/N' path to add a location # to the image's 'locations' list at position N. # (implemented) # 2. using '/tags/-' path to append a tag to the # image's 'tags' list at the end. (Not implemented) partial_image = {path_root: [change['value']]} if partial_image: try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) def _validate_path(self, op, path): path_root = path[0] limits = self._path_depth_limits.get(path_root, {}) if len(path) != limits.get(op, 1): msg = _("Invalid JSON pointer for this resource: " "'/%s'") % '/'.join(path) raise webob.exc.HTTPBadRequest(explanation=msg) def _parse_json_schema_change(self, raw_change, draft_version): if draft_version == 10: op = self._get_change_operation_d10(raw_change) path = self._get_change_path_d10(raw_change) elif draft_version == 4: op = self._get_change_operation_d4(raw_change) path = self._get_change_path_d4(raw_change, op) else: msg = _('Unrecognized JSON Schema draft version') raise webob.exc.HTTPBadRequest(explanation=msg) path_list = self._decode_json_pointer(path) return op, path_list def update(self, request): changes = [] content_types = { 'application/openstack-images-v2.0-json-patch': 4, 'application/openstack-images-v2.1-json-patch': 10, } if request.content_type not in content_types: headers = {'Accept-Patch': ', '.join(sorted(content_types.keys()))} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) json_schema_version = content_types[request.content_type] body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) (op, path) = self._parse_json_schema_change(raw_change, json_schema_version) # NOTE(zhiyan): the 'path' is a list. self._validate_path(op, path) change = {'op': op, 'path': path, 'json_schema_version': json_schema_version} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_key(self, sort_key): if sort_key not in self._available_sort_keys: msg = _('Invalid sort key: %(sort_key)s. ' 'It must be one of the following: %(available)s.') % ( {'sort_key': sort_key, 'available': ', '.join(self._available_sort_keys)}) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_key def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _validate_member_status(self, member_status): if member_status not in ['pending', 'accepted', 'rejected', 'all']: msg = _('Invalid status: %s') % member_status raise webob.exc.HTTPBadRequest(explanation=msg) return member_status def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['community', 'public', 'private', 'shared', 'all']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) changes_since = filters.get('changes-since') if changes_since: msg = _('The "changes-since" filter is no longer available on v2.') raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _get_sorting_params(self, params): """ Process sorting params. Currently glance supports two sorting syntax: classic and new one, that is uniform for all OpenStack projects. Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc New syntax: sort=name:asc,size:desc """ sort_keys = [] sort_dirs = [] if 'sort' in params: # use new sorting syntax here if 'sort_key' in params or 'sort_dir' in params: msg = _('Old and new sorting syntax cannot be combined') raise webob.exc.HTTPBadRequest(explanation=msg) for sort_param in params.pop('sort').strip().split(','): key, _sep, dir = sort_param.partition(':') if not dir: dir = self._default_sort_dir sort_keys.append(self._validate_sort_key(key.strip())) sort_dirs.append(self._validate_sort_dir(dir.strip())) else: # continue with classic syntax # NOTE(mfedosin): we have 3 options here: # 1. sort_dir wasn't passed: we use default one - 'desc'. # 2. Only one sort_dir was passed: use it for every sort_key # in the list. # 3. Multiple sort_dirs were passed: consistently apply each one to # the corresponding sort_key. # If number of sort_dirs and sort_keys doesn't match then raise an # exception. while 'sort_key' in params: sort_keys.append(self._validate_sort_key( params.pop('sort_key').strip())) while 'sort_dir' in params: sort_dirs.append(self._validate_sort_dir( params.pop('sort_dir').strip())) if sort_dirs: dir_len = len(sort_dirs) key_len = len(sort_keys) if dir_len > 1 and dir_len != key_len: msg = _('Number of sort dirs does not match the number ' 'of sort keys') raise webob.exc.HTTPBadRequest(explanation=msg) if not sort_keys: sort_keys = [self._default_sort_key] if not sort_dirs: sort_dirs = [self._default_sort_dir] return sort_keys, sort_dirs def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) member_status = params.pop('member_status', 'accepted') # NOTE (flwang) To avoid using comma or any predefined chars to split # multiple tags, now we allow user specify multiple 'tag' parameters # in URL, such as v2/images?tag=x86&tag=64bit. tags = [] while 'tag' in params: tags.append(params.pop('tag').strip()) query_params = { 'filters': self._get_filters(params), 'member_status': self._validate_member_status(member_status), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) if tags: query_params['filters']['tags'] = tags # NOTE(mfedosin): param is still called sort_key and sort_dir, # instead of sort_keys and sort_dirs respectively. # It's done because in v1 it's still a single value. query_params['sort_key'], query_params['sort_dir'] = ( self._get_sorting_params(params)) return query_params def _validate_import_body(self, body): # TODO(rosmaita): do schema validation of body instead # of this ad-hoc stuff try: method = body['method'] except KeyError: msg = _("Import request requires a 'method' field.") raise webob.exc.HTTPBadRequest(explanation=msg) try: method_name = method['name'] except KeyError: msg = _("Import request requires a 'name' field.") raise webob.exc.HTTPBadRequest(explanation=msg) if method_name not in CONF.enabled_import_methods: msg = _("Unknown import method name '%s'.") % method_name raise webob.exc.HTTPBadRequest(explanation=msg) # Validate 'all_stores_must_succeed' and 'all_stores' all_stores_must_succeed = body.get('all_stores_must_succeed', True) if not isinstance(all_stores_must_succeed, bool): msg = (_("'all_stores_must_succeed' must be boolean value only")) raise webob.exc.HTTPBadRequest(explanation=msg) all_stores = body.get('all_stores', False) if not isinstance(all_stores, bool): msg = (_("'all_stores' must be boolean value only")) raise webob.exc.HTTPBadRequest(explanation=msg) def import_image(self, request): body = self._get_request_body(request) self._validate_import_body(body) return {'body': body} def add_location(self, request): body = self._get_request_body(request) values = {'add_location': body} try: self.location_schema.validate(values) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return {'body': body} class ResponseSerializer(wsgi.JSONResponseSerializer): # These properties will be filtered out from the response and not # exposed to the client _hidden_properties = ['os_glance_stage_host'] def __init__(self, schema=None, location_schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() self.location_schema = location_schema or get_location_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image.image_id if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _format_image(self, image): def _get_image_locations(image): try: return list(image.locations) except exception.Forbidden: return [] try: image_view = {k: v for k, v in dict(image.extra_properties).items() if k not in self._hidden_properties} attributes = ['name', 'disk_format', 'container_format', 'visibility', 'size', 'virtual_size', 'status', 'checksum', 'protected', 'min_ram', 'min_disk', 'owner', 'os_hidden', 'os_hash_algo', 'os_hash_value'] for key in attributes: image_view[key] = getattr(image, key) image_view['id'] = image.image_id image_view['created_at'] = timeutils.isotime(image.created_at) image_view['updated_at'] = timeutils.isotime(image.updated_at) if CONF.show_multiple_locations: locations = _get_image_locations(image) if locations: image_view['locations'] = [] for loc in locations: tmp = dict(loc) tmp.pop('id', None) tmp.pop('status', None) image_view['locations'].append(tmp) else: # NOTE (flwang): We will still show "locations": [] if # image.locations is None to indicate it's allowed to show # locations but it's just non-existent. image_view['locations'] = [] LOG.debug("The 'locations' list of image %s is empty", image.image_id) if CONF.show_image_direct_url: locations = _get_image_locations(image) if locations: # Choose best location configured strategy loc = utils.sort_image_locations(locations)[0] image_view['direct_url'] = loc['url'] else: LOG.debug("The 'locations' list of image %s is empty, " "not including 'direct_url' in response", image.image_id) image_view['tags'] = list(image.tags) image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' image_view = self.schema.filter(image_view) # domain # add store information to image if CONF.enabled_backends: locations = _get_image_locations(image) if locations: stores = [] for loc in locations: backend = loc['metadata'].get('store') if backend: stores.append(backend) if stores: image_view['stores'] = ",".join(stores) return image_view except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) def create(self, response, image): response.status_int = http.CREATED self.show(response, image) response.location = self._get_image_href(image) # according to RFC7230, headers should not have empty fields # see http://httpwg.org/specs/rfc7230.html#field.components if CONF.enabled_import_methods: import_methods = ("OpenStack-image-import-methods", ','.join(CONF.enabled_import_methods)) response.headerlist.append(import_methods) if CONF.enabled_backends: enabled_backends = ("OpenStack-image-store-ids", ','.join(CONF.enabled_backends.keys())) response.headerlist.append(enabled_backends) def show(self, response, image): image_view = self._format_image(image) response.unicode_body = json.dumps(image_view, ensure_ascii=False) response.content_type = 'application/json' def update(self, response, image): image_view = self._format_image(image) response.unicode_body = json.dumps(image_view, ensure_ascii=False) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urlparse.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = json.dumps(body, ensure_ascii=False) response.content_type = 'application/json' def delete_from_store(self, response, result): response.status_int = http.NO_CONTENT def delete(self, response, result): response.status_int = http.NO_CONTENT def import_image(self, response, result): response.status_int = http.ACCEPTED def add_location(self, response, result): response.status_int = http.ACCEPTED def get_base_properties(): return { 'id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': ['null', 'string'], 'description': _('Descriptive name for the image'), 'maxLength': 255, }, 'status': { 'type': 'string', 'readOnly': True, 'description': _('Status of the image'), 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'uploading', 'importing', 'pending_delete', 'deactivated'], }, 'visibility': { 'type': 'string', 'description': _('Scope of image accessibility'), 'enum': ['community', 'public', 'private', 'shared'], }, 'protected': { 'type': 'boolean', 'description': _('If true, image will not be deletable.'), }, 'os_hidden': { 'type': 'boolean', 'description': _('If true, image will not appear in default ' 'image list response.'), }, 'checksum': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('md5 hash of image contents.'), 'maxLength': 32, }, 'os_hash_algo': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('Algorithm to calculate the os_hash_value'), 'maxLength': 64, }, 'os_hash_value': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('Hexdigest of the image contents using the ' 'algorithm specified by the os_hash_algo'), 'maxLength': 128, }, 'owner': { 'type': ['null', 'string'], 'description': _('Owner of the image'), 'maxLength': 255, }, 'size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Size of image file in bytes'), }, 'virtual_size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Virtual size of image in bytes'), }, 'container_format': { 'type': ['null', 'string'], 'description': _('Format of the container'), 'enum': [None] + CONF.image_format.container_formats, }, 'disk_format': { 'type': ['null', 'string'], 'description': _('Format of the disk'), 'enum': [None] + CONF.image_format.disk_formats, }, 'created_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of image registration' ), # TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! # 'format': 'date-time', }, 'updated_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of the last image modification' ), # 'format': 'date-time', }, 'tags': { 'type': 'array', 'description': _('List of strings related to the image'), 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'readOnly': True, 'description': _('URL to access the image file kept in external ' 'store'), }, 'min_ram': { 'type': 'integer', 'description': _('Amount of ram (in MB) required to boot image.'), }, 'min_disk': { 'type': 'integer', 'description': _('Amount of disk space (in GB) required to boot ' 'image.'), }, 'self': { 'type': 'string', 'readOnly': True, 'description': _('An image self url'), }, 'file': { 'type': 'string', 'readOnly': True, 'description': _('An image file url'), }, 'stores': { 'type': 'string', 'readOnly': True, 'description': _('Store in which image data resides. Only ' 'present when the operator has enabled multiple ' 'stores. May be a comma-separated list of store ' 'identifiers.'), }, 'schema': { 'type': 'string', 'readOnly': True, 'description': _('An image schema url'), }, 'locations': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'url': { 'type': 'string', 'maxLength': 255, }, 'metadata': { 'type': 'object', }, 'validation_data': { 'description': _( 'Values to be used to populate the corresponding ' 'image properties. If the image status is not ' '\'queued\', values must exactly match those ' 'already contained in the image properties.' ), 'type': 'object', 'writeOnly': True, 'additionalProperties': False, 'properties': { 'checksum': { 'type': 'string', 'minLength': 32, 'maxLength': 32, }, 'os_hash_algo': { 'type': 'string', 'maxLength': 64, }, 'os_hash_value': { 'type': 'string', 'maxLength': 128, }, }, 'required': [ 'os_hash_algo', 'os_hash_value', ], }, }, 'required': ['url', 'metadata'], }, 'description': _('A set of URLs to access the image file kept in ' 'external store'), }, } def get_add_location_properties(): return { 'add_location': { 'type': 'object', 'description': _('Values of location url, do_secure_hash and ' 'validation_data for new add location API'), 'properties': { 'url': { 'type': 'string', 'readOnly': True, 'description': _('The URL of the new location to be ' 'added in the image.') }, 'validation_data': { 'description': _('Values to be used to populate the ' 'corresponding image properties.' 'do_secure_hash is not True then ' 'image checksum and hash will not be ' 'calculated so it is the responsibility' ' of the consumer of location ADD API ' 'to provide the correct values in the ' 'validation_data parameter'), 'type': 'object', 'writeOnly': True, 'additionalProperties': False, 'properties': { 'os_hash_algo': { 'type': 'string', 'maxLength': 64, 'enum': ['sha1', 'sha256', 'sha512', 'md5'], }, 'os_hash_value': { 'type': 'string', 'maxLength': 128, }, }, 'dependentRequired': { "os_hash_value": ["os_hash_algo"], "os_hash_algo": ["os_hash_value"] }, }, }, 'required': ['url'], }, } def _get_base_links(): return [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = get_base_properties() links = _get_base_links() schema = glance.schema.PermissiveSchema('image', properties, links) if custom_properties: for property_value in custom_properties.values(): property_value['is_base'] = False schema.merge_properties(custom_properties) return schema def get_location_schema(): properties = get_add_location_properties() schema = glance.schema.PermissiveSchema('location', properties) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: with open(match, 'r') as schema_file: schema_data = schema_file.read() return json.loads(schema_data) else: msg = (_LW('Could not find schema properties file %s. Continuing ' 'without custom properties') % filename) LOG.warning(msg) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) location_schema = get_location_schema() deserializer = RequestDeserializer(schema, location_schema) serializer = ResponseSerializer(schema, location_schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/metadef_namespaces.py0000664000175000017500000011222000000000000021563 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http import urllib.parse as urlparse from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_namespace import Namespace from glance.api.v2.model.metadef_namespace import Namespaces from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_tag import MetadefTag from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import utils from glance.common import wsgi from glance.common import wsme_utils import glance.db import glance.gateway from glance.i18n import _, _LE import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class NamespaceController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.ns_schema_link = '/v2/schemas/metadefs/namespace' self.obj_schema_link = '/v2/schemas/metadefs/object' self.tag_schema_link = '/v2/schemas/metadefs/tag' def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) policy_check = api_policy.MetadefAPIPolicy( req.context, enforcer=self.policy) # NOTE(abhishekk): This is just a "do you have permission to # list namespace" check. Each namespace is checked against # get_metadef_namespace below. policy_check.get_metadef_namespaces() # NOTE(abhishekk): We also need to fetch resource_types associated # with namespaces, so better to check we have permission for the # same in advance. policy_check.list_metadef_resource_types() # Get namespace id if marker: namespace_obj = ns_repo.get(marker) marker = namespace_obj.namespace_id database_ns_list = ns_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) ns_list = [ ns for ns in database_ns_list if api_policy.MetadefAPIPolicy( req.context, md_resource=ns, enforcer=self.policy).check( 'get_metadef_namespace')] rs_repo = ( self.gateway.get_metadef_resource_type_repo(req.context)) for db_namespace in ns_list: # Get resource type associations filters = dict() filters['namespace'] = db_namespace.namespace try: repo_rs_type_list = rs_repo.list(filters=filters) except exception.NotFound: # NOTE(danms): If we fail to list resource_types # for this namespace, do not fail the entire # namespace list operation with NotFound. repo_rs_type_list = [] resource_type_list = [ ResourceTypeAssociation.to_wsme_model( resource_type ) for resource_type in repo_rs_type_list] if resource_type_list: db_namespace.resource_type_associations = ( resource_type_list) namespace_list = [Namespace.to_wsme_model( db_namespace, get_namespace_href(db_namespace), self.ns_schema_link) for db_namespace in ns_list] namespaces = Namespaces() namespaces.namespaces = namespace_list if len(namespace_list) != 0 and len(namespace_list) == limit: namespaces.next = ns_list[-1].namespace except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata namespaces " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return namespaces @utils.mutating def create(self, req, namespace): try: namespace_created = False # Create Namespace ns_factory = self.gateway.get_metadef_namespace_factory( req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) # NOTE(abhishekk): Here we are going to check if user is authorized # to create namespace, resource_types, objects, properties etc. policy_check = api_policy.MetadefAPIPolicy( req.context, enforcer=self.policy) policy_check.add_metadef_namespace() if namespace.resource_type_associations: policy_check.add_metadef_resource_type_association() if namespace.objects: policy_check.add_metadef_object() if namespace.properties: policy_check.add_metadef_property() if namespace.tags: policy_check.add_metadef_tag() # NOTE(abhishekk): As we are getting rid of auth layer, this # is the place where we should add owner if it is not specified # in request. kwargs = namespace.to_dict() if 'owner' not in kwargs: kwargs.update({'owner': req.context.owner}) new_namespace = ns_factory.new_namespace(**kwargs) ns_repo.add(new_namespace) namespace_created = True # Create Resource Types if namespace.resource_type_associations: rs_factory = (self.gateway.get_metadef_resource_type_factory( req.context)) rs_repo = self.gateway.get_metadef_resource_type_repo( req.context) for resource_type in namespace.resource_type_associations: new_resource = rs_factory.new_resource_type( namespace=namespace.namespace, **resource_type.to_dict()) rs_repo.add(new_resource) # Create Objects if namespace.objects: object_factory = self.gateway.get_metadef_object_factory( req.context) object_repo = self.gateway.get_metadef_object_repo( req.context) for metadata_object in namespace.objects: new_meta_object = object_factory.new_object( namespace=namespace.namespace, **metadata_object.to_dict()) object_repo.add(new_meta_object) # Create Tags if namespace.tags: tag_factory = self.gateway.get_metadef_tag_factory( req.context) tag_repo = self.gateway.get_metadef_tag_repo( req.context) for metadata_tag in namespace.tags: new_meta_tag = tag_factory.new_tag( namespace=namespace.namespace, **metadata_tag.to_dict()) tag_repo.add(new_meta_tag) # Create Namespace Properties if namespace.properties: prop_factory = (self.gateway.get_metadef_property_factory( req.context)) prop_repo = self.gateway.get_metadef_property_repo( req.context) for (name, value) in namespace.properties.items(): new_property_type = ( prop_factory.new_namespace_property( namespace=namespace.namespace, **self._to_property_dict(name, value) )) prop_repo.add(new_property_type) except exception.Invalid as e: msg = (_("Couldn't create metadata namespace: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) LOG.debug("User not permitted to create metadata namespace") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPConflict(explanation=e.msg) # Return the user namespace as we don't expose the id to user new_namespace.properties = namespace.properties new_namespace.objects = namespace.objects new_namespace.resource_type_associations = ( namespace.resource_type_associations) new_namespace.tags = namespace.tags return Namespace.to_wsme_model(new_namespace, get_namespace_href(new_namespace), self.ns_schema_link) def _to_property_dict(self, name, value): # Convert the model PropertyTypes dict to a JSON string db_property_type_dict = dict() db_property_type_dict['schema'] = json.tojson(PropertyType, value) db_property_type_dict['name'] = name return db_property_type_dict def _cleanup_namespace(self, namespace_repo, namespace, namespace_created): if namespace_created: try: namespace_obj = namespace_repo.get(namespace.namespace) namespace_obj.delete() namespace_repo.remove(namespace_obj) LOG.debug("Cleaned up namespace %(namespace)s ", {'namespace': namespace.namespace}) except Exception as e: msg = (_LE("Failed to delete namespace %(namespace)s." "Exception: %(exception)s"), {'namespace': namespace.namespace, 'exception': encodeutils.exception_to_unicode(e)}) LOG.error(msg) def show(self, req, namespace, filters=None): try: # Get namespace ns_repo = self.gateway.get_metadef_namespace_repo( req.context) try: namespace_obj = ns_repo.get(namespace) policy_check = api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy) policy_check.get_metadef_namespace() except (exception.Forbidden, webob.exc.HTTPForbidden): LOG.debug("User not permitted to show namespace '%s'", namespace) # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project raise webob.exc.HTTPNotFound() # NOTE(abhishekk): We also need to fetch resource_types, objects, # properties, tags associated with namespace, so better to check # whether user has permissions for the same. policy_check.list_metadef_resource_types() policy_check.get_metadef_objects() policy_check.get_metadef_properties() policy_check.get_metadef_tags() namespace_detail = Namespace.to_wsme_model( namespace_obj, get_namespace_href(namespace_obj), self.ns_schema_link) ns_filters = dict() ns_filters['namespace'] = namespace # Get objects object_repo = self.gateway.get_metadef_object_repo(req.context) db_metaobject_list = object_repo.list(filters=ns_filters) object_list = [MetadefObject.to_wsme_model( db_metaobject, get_object_href(namespace, db_metaobject), self.obj_schema_link) for db_metaobject in db_metaobject_list] if object_list: namespace_detail.objects = object_list # Get resource type associations rs_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_repo.list(filters=ns_filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] if resource_type_list: namespace_detail.resource_type_associations = ( resource_type_list) # Get properties prop_repo = self.gateway.get_metadef_property_repo(req.context) db_properties = prop_repo.list(filters=ns_filters) property_list = Namespace.to_model_properties(db_properties) if property_list: namespace_detail.properties = property_list if filters and filters['resource_type']: namespace_detail = self._prefix_property_name( namespace_detail, filters['resource_type']) # Get tags tag_repo = self.gateway.get_metadef_tag_repo(req.context) db_metatag_list = tag_repo.list(filters=ns_filters) tag_list = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in db_metatag_list] if tag_list: namespace_detail.tags = tag_list except exception.Forbidden as e: LOG.debug("User not permitted to show metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return namespace_detail def update(self, req, user_ns, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) try: ns_obj = namespace_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Here we are just checking if use is authorized # to modify the namespace or not api_policy.MetadefAPIPolicy( req.context, md_resource=ns_obj, enforcer=self.policy).modify_metadef_namespace() ns_obj._old_namespace = ns_obj.namespace ns_obj.namespace = wsme_utils._get_value(user_ns.namespace) ns_obj.display_name = wsme_utils._get_value(user_ns.display_name) ns_obj.description = wsme_utils._get_value(user_ns.description) # Following optional fields will default to same values as in # create namespace if not specified ns_obj.visibility = ( wsme_utils._get_value(user_ns.visibility) or 'private') ns_obj.protected = ( wsme_utils._get_value(user_ns.protected) or False) ns_obj.owner = ( wsme_utils._get_value(user_ns.owner) or req.context.owner) updated_namespace = namespace_repo.save(ns_obj) except exception.Invalid as e: msg = (_("Couldn't update metadata namespace: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return Namespace.to_wsme_model(updated_namespace, get_namespace_href(updated_namespace), self.ns_schema_link) def delete(self, req, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = namespace_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Here we are just checking user is authorized to # delete the namespace or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() namespace_repo.remove(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_objects(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() ns_repo.remove_objects(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata objects " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_tags(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. policy_check = api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy) policy_check.delete_metadef_namespace() # NOTE(abhishekk): This call checks whether user # has permission to delete the tags or not. policy_check.delete_metadef_tags() namespace_obj.delete() ns_repo.remove_tags(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata tags " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def delete_properties(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This call currently checks whether user # has permission to delete the namespace or not before deleting # the objects associated with it. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_namespace() namespace_obj.delete() ns_repo.remove_properties(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata properties " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def _prefix_property_name(self, namespace_detail, user_resource_type): prefix = None if user_resource_type and namespace_detail.resource_type_associations: for resource_type in namespace_detail.resource_type_associations: if resource_type.name == user_resource_type: prefix = resource_type.prefix break if prefix: if namespace_detail.properties: new_property_dict = dict() for (key, value) in namespace_detail.properties.items(): new_property_dict[prefix + key] = value namespace_detail.properties = new_property_dict if namespace_detail.objects: for object in namespace_detail.objects: new_object_property_dict = dict() for (key, value) in object.properties.items(): new_object_property_dict[prefix + key] = value object.properties = new_object_property_dict if object.required and len(object.required) > 0: required = [prefix + name for name in object.required] object.required = required return namespace_detail class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, int(limit)) query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def show(self, request): params = request.params.copy() query_params = { 'filters': self._get_filters(params) } return query_params def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(namespace=namespace) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(user_ns=namespace) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def create(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, http.CREATED) response.location = get_namespace_href(namespace) def show(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response) def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) result.first = "/v2/metadefs/namespaces" result.schema = "/v2/schemas/metadefs/namespaces" if query: result.first = '%s?%s' % (result.first, query) if result.next: params['marker'] = result.next next_query = urlparse.urlencode(params) result.next = '/v2/metadefs/namespaces?%s' % next_query ns_json = json.tojson(Namespaces, result) response = self.__render(ns_json, response) def update(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, http.OK) def delete(self, response, result): response.status_int = http.NO_CONTENT def delete_objects(self, response, result): response.status_int = http.NO_CONTENT def delete_properties(self, response, result): response.status_int = http.NO_CONTENT def delete_tags(self, response, result): response.status_int = http.NO_CONTENT def __render(self, json_data, response, response_status=None): body = jsonutils.dumps(json_data, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' if response_status: response.status_int = response_status return response def _get_base_definitions(): return get_schema_definitions() def get_schema_definitions(): return { "positiveInteger": { "type": "integer", "minimum": 0 }, "positiveIntegerDefault0": { "allOf": [ {"$ref": "#/definitions/positiveInteger"}, {"default": 0} ] }, "stringArray": { "type": "array", "items": {"type": "string"}, # "minItems": 1, "uniqueItems": True }, "property": { "type": "object", "additionalProperties": { "type": "object", "required": ["title", "type"], "properties": { "name": { "type": "string", "maxLength": 80 }, "title": { "type": "string" }, "description": { "type": "string" }, "operators": { "type": "array", "items": { "type": "string" } }, "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "required": { "$ref": "#/definitions/stringArray" }, "minimum": { "type": "number" }, "maximum": { "type": "number" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "pattern": { "type": "string", "format": "regex" }, "enum": { "type": "array" }, "readonly": { "type": "boolean" }, "default": {}, "items": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "enum": { "type": "array" } } }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "uniqueItems": { "type": "boolean", "default": False }, "additionalItems": { "type": "boolean" }, } } } } def _get_base_properties(): return { "namespace": { "type": "string", "description": _("The unique namespace text."), "maxLength": 80, }, "display_name": { "type": "string", "description": _("The user friendly name for the namespace. Used " "by UI if available."), "maxLength": 80, }, "description": { "type": "string", "description": _("Provides a user friendly description of the " "namespace."), "maxLength": 500, }, "visibility": { "type": "string", "description": _("Scope of namespace accessibility."), "enum": ["public", "private"], }, "protected": { "type": "boolean", "description": _("If true, namespace will not be deletable."), }, "owner": { "type": "string", "description": _("Owner of the namespace."), "maxLength": 255, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of namespace creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last namespace" " modification"), "format": "date-time" }, "schema": { 'readOnly': True, "type": "string" }, "self": { 'readOnly': True, "type": "string" }, "resource_type_associations": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "prefix": { "type": "string" }, "properties_target": { "type": "string" } } } }, "properties": { "$ref": "#/definitions/property" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, } } }, "tags": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } } } }, } def get_schema(): properties = _get_base_properties() definitions = _get_base_definitions() mandatory_attrs = Namespace.get_mandatory_attrs() schema = glance.schema.Schema( 'namespace', properties, required=mandatory_attrs, definitions=definitions ) return schema def get_collection_schema(): namespace_schema = get_schema() return glance.schema.CollectionSchema('namespaces', namespace_schema) def get_namespace_href(namespace): base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace return base_href def get_object_href(namespace_name, metadef_object): base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadef_object.name)) return base_href def get_tag_href(namespace_name, metadef_tag): base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadef_tag.name)) return base_href def create_resource(): """Namespaces resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = NamespaceController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/metadef_objects.py0000664000175000017500000004336300000000000021110 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2 import metadef_namespaces as namespaces import glance.api.v2.metadef_properties as properties from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_object import MetadefObjects from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi from glance.common import wsme_utils import glance.db from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class MetadefObjectsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.obj_schema_link = '/v2/schemas/metadefs/object' def create(self, req, metadata_object, namespace): object_factory = self.gateway.get_metadef_object_factory(req.context) object_repo = self.gateway.get_metadef_object_repo(req.context) try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: # NOTE(abhishekk): Verifying that namespace is visible # to user namespace_obj = ns_repo.get(namespace) except exception.Forbidden: # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise exception.NotFound(msg) # NOTE(abhishekk): Metadef object is created for Metadef namespaces # Here we are just checking if user is authorized to create metadef # object or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).add_metadef_object() new_meta_object = object_factory.new_object( namespace=namespace, **metadata_object.to_dict()) object_repo.add(new_meta_object) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata object within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Invalid as e: msg = (_("Couldn't create metadata object: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return MetadefObject.to_wsme_model( new_meta_object, get_object_href(namespace, new_meta_object), self.obj_schema_link) def index(self, req, namespace, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except exception.Forbidden: # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise exception.NotFound(msg) # NOTE(abhishekk): This is just a "do you have permission to # list objects" check. Each object is checked against # get_metadef_object below. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).get_metadef_objects() filters = filters or dict() filters['namespace'] = namespace object_repo = self.gateway.get_metadef_object_repo(req.context) db_metaobject_list = object_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) object_list = [ MetadefObject.to_wsme_model( obj, get_object_href(namespace, obj), self.obj_schema_link ) for obj in db_metaobject_list if api_policy.MetadefAPIPolicy( req.context, md_resource=obj.namespace, enforcer=self.policy ).check('get_metadef_object')] metadef_objects = MetadefObjects() metadef_objects.objects = object_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata objects within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return metadef_objects def show(self, req, namespace, object_name): meta_object_repo = self.gateway.get_metadef_object_repo(req.context) try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except exception.Forbidden: # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise exception.NotFound(msg) # NOTE(abhishekk): Metadef objects are associated with # namespace, so made provision to pass namespace here # for visibility check api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).get_metadef_object() metadef_object = meta_object_repo.get(namespace, object_name) return MetadefObject.to_wsme_model( metadef_object, get_object_href(namespace, metadef_object), self.obj_schema_link) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata object '%s' " "within '%s' namespace", namespace, object_name) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def update(self, req, metadata_object, namespace, object_name): meta_repo = self.gateway.get_metadef_object_repo(req.context) try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: # NOTE(abhishekk): Verifying that namespace is visible # to user namespace_obj = ns_repo.get(namespace) except exception.Forbidden: # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise exception.NotFound(msg) # NOTE(abhishekk): Metadef object is created for Metadef namespaces # Here we are just checking if user is authorized to modify metadef # object or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).modify_metadef_object() metadef_object = meta_repo.get(namespace, object_name) metadef_object._old_name = metadef_object.name metadef_object.name = wsme_utils._get_value( metadata_object.name) metadef_object.description = wsme_utils._get_value( metadata_object.description) metadef_object.required = wsme_utils._get_value( metadata_object.required) metadef_object.properties = wsme_utils._get_value( metadata_object.properties) updated_metadata_obj = meta_repo.save(metadef_object) except exception.Invalid as e: msg = (_("Couldn't update metadata object: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata object '%s' " "within '%s' namespace ", object_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return MetadefObject.to_wsme_model( updated_metadata_obj, get_object_href(namespace, updated_metadata_obj), self.obj_schema_link) def delete(self, req, namespace, object_name): meta_repo = self.gateway.get_metadef_object_repo(req.context) try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: # NOTE(abhishekk): Verifying that namespace is visible # to user namespace_obj = ns_repo.get(namespace) except exception.Forbidden: # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise exception.NotFound(msg) # NOTE(abhishekk): Metadef object is created for Metadef namespaces # Here we are just checking if user is authorized to delete metadef # object or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_object() metadef_object = meta_repo.get(namespace, object_name) metadef_object.delete() meta_repo.remove(metadef_object) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata object '%s' " "within '%s' namespace", object_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def _get_base_definitions(): return namespaces.get_schema_definitions() def _get_base_properties(): return { "name": { "type": "string", "maxLength": 80 }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, "schema": { 'readOnly': True, "type": "string" }, "self": { 'readOnly': True, "type": "string" }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of object creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last object modification"), "format": "date-time" } } def get_schema(): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = MetadefObject.get_mandatory_attrs() schema = glance.schema.Schema( 'object', properties, required=mandatory_attrs, definitions=definitions, ) return schema def get_collection_schema(): object_schema = get_schema() return glance.schema.CollectionSchema('objects', object_schema) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) if 'properties' in body: for propertyname in body['properties']: schema = properties.get_schema(require_name=False) schema.validate(body['properties'][propertyname]) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_object = json.fromjson(MetadefObject, body) return dict(metadata_object=metadata_object) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_object = json.fromjson(MetadefObject, body) return dict(metadata_object=metadata_object) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private', 'shared']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit <= 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def create(self, response, metadata_object): response.status_int = http.CREATED self.show(response, metadata_object) def show(self, response, metadata_object): metadata_object_json = json.tojson(MetadefObject, metadata_object) body = jsonutils.dumps(metadata_object_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def update(self, response, metadata_object): response.status_int = http.OK self.show(response, metadata_object) def index(self, response, result): result.schema = "v2/schemas/metadefs/objects" metadata_objects_json = json.tojson(MetadefObjects, result) body = jsonutils.dumps(metadata_objects_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def delete(self, response, result): response.status_int = http.NO_CONTENT def get_object_href(namespace_name, metadef_object): base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadef_object.name)) return base_href def create_resource(): """Metadef objects resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = MetadefObjectsController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/metadef_properties.py0000664000175000017500000004004700000000000021647 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2 import metadef_namespaces as namespaces from glance.api.v2.model.metadef_namespace import Namespace from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_property_type import PropertyTypes from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class NamespacePropertiesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) def _to_dict(self, model_property_type): # Convert the model PropertyTypes dict to a JSON encoding db_property_type_dict = dict() db_property_type_dict['schema'] = json.tojson( PropertyType, model_property_type) db_property_type_dict['name'] = model_property_type.name return db_property_type_dict def _to_model(self, db_property_type): # Convert the persisted json schema to a dict of PropertyTypes property_type = json.fromjson( PropertyType, db_property_type.schema) property_type.name = db_property_type.name return property_type def index(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This is just a "do you have permission to # list properties" check. Each property is checked against # get_metadef_property below. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).get_metadef_properties() filters = dict() filters['namespace'] = namespace prop_repo = self.gateway.get_metadef_property_repo(req.context) db_properties = prop_repo.list(filters=filters) property_list = Namespace.to_model_properties(db_properties) namespace_properties = PropertyTypes() namespace_properties.properties = property_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata properties " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return namespace_properties def show(self, req, namespace, property_name, filters=None): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef properties are associated with # namespace, so made provision to pass namespace here # for visibility check api_pol = api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy) api_pol.get_metadef_property() if filters and filters['resource_type']: # Verify that you can fetch resource type details api_pol.get_metadef_resource_type() rs_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type = rs_repo.get(filters['resource_type'], namespace) prefix = db_resource_type.prefix if prefix and property_name.startswith(prefix): property_name = property_name[len(prefix):] else: msg = (_("Property %(property_name)s does not start " "with the expected resource type association " "prefix of '%(prefix)s'.") % {'property_name': property_name, 'prefix': prefix}) raise exception.NotFound(msg) prop_repo = self.gateway.get_metadef_property_repo(req.context) db_property = prop_repo.get(namespace, property_name) property = self._to_model(db_property) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return property def create(self, req, namespace, property_type): prop_factory = self.gateway.get_metadef_property_factory(req.context) prop_repo = self.gateway.get_metadef_property_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef property is created for Metadef # namespaces. Here we are just checking if user is authorized # to create metadef property or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).add_metadef_property() new_property_type = prop_factory.new_namespace_property( namespace=namespace, **self._to_dict(property_type)) prop_repo.add(new_property_type) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata property within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Invalid as e: msg = (_("Couldn't create metadata property: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return self._to_model(new_property_type) def update(self, req, namespace, property_name, property_type): prop_repo = self.gateway.get_metadef_property_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef property is created for Metadef # namespaces. Here we are just checking if user is authorized # to update metadef property or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).modify_metadef_property() db_property_type = prop_repo.get(namespace, property_name) db_property_type._old_name = db_property_type.name db_property_type.name = property_type.name db_property_type.schema = (self._to_dict(property_type))['schema'] updated_property_type = prop_repo.save(db_property_type) except exception.Invalid as e: msg = (_("Couldn't update metadata property: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return self._to_model(updated_property_type) def delete(self, req, namespace, property_name): prop_repo = self.gateway.get_metadef_property_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef property is created for Metadef # namespaces. Here we are just checking if user is authorized # to delete metadef property or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).remove_metadef_property() property_type = prop_repo.get(namespace, property_name) property_type.delete() prop_repo.remove(property_type) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) property_type = json.fromjson(PropertyType, body) return dict(property_type=property_type) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) property_type = json.fromjson(PropertyType, body) return dict(property_type=property_type) def show(self, request): params = request.params.copy() query_params = { 'filters': params } return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): property_type_json = json.tojson(PropertyType, result) body = jsonutils.dumps(property_type_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def index(self, response, result): property_type_json = json.tojson(PropertyTypes, result) body = jsonutils.dumps(property_type_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def create(self, response, result): response.status_int = http.CREATED self.show(response, result) def update(self, response, result): response.status_int = http.OK self.show(response, result) def delete(self, response, result): response.status_int = http.NO_CONTENT def _get_base_definitions(): return { "positiveInteger": { "type": "integer", "minimum": 0 }, "positiveIntegerDefault0": { "allOf": [ {"$ref": "#/definitions/positiveInteger"}, {"default": 0} ] }, "stringArray": { "type": "array", "items": {"type": "string"}, "minItems": 1, "uniqueItems": True } } def _get_base_properties(): base_def = namespaces.get_schema_definitions() return base_def['property']['additionalProperties']['properties'] def get_schema(require_name=True): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = PropertyType.get_mandatory_attrs() if require_name: # name is required attribute when use as single property type mandatory_attrs.append('name') schema = glance.schema.Schema( 'property', properties, required=mandatory_attrs, definitions=definitions ) return schema def get_collection_schema(): namespace_properties_schema = get_schema() # Property name is a dict key and not a required attribute in # individual property schema inside property collections namespace_properties_schema.required.remove('name') return glance.schema.DictCollectionSchema('properties', namespace_properties_schema) def create_resource(): """NamespaceProperties resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = NamespacePropertiesController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/metadef_resource_types.py0000664000175000017500000003327700000000000022535 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_log import log as logging from oslo_serialization import jsonutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_resource_type import ResourceType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations from glance.api.v2.model.metadef_resource_type import ResourceTypes from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class ResourceTypeController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) def index(self, req): try: filters = {'namespace': None} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) # NOTE(abhishekk): Here we are just checking if user is # authorized to view/list metadef resource types or not. # Also there is no relation between list_metadef_resource_types # and get_metadef_resource_type policies so can not enforce # get_metadef_resource_type policy on individual resource # type here. api_policy.MetadefAPIPolicy( req.context, enforcer=self.policy).list_metadef_resource_types() db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceType.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypes() resource_types.resource_types = resource_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return resource_types def show(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Here we are just checking if user is # authorized to view/list metadef resource types or not. # Each resource_type is checked against # get_metadef_resource_type below. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).list_metadef_resource_types() filters = {'namespace': namespace} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_type_list = rs_type_repo.list(filters=filters) rs_type_list = [ ResourceTypeAssociation.to_wsme_model( rs_type ) for rs_type in db_type_list if api_policy.MetadefAPIPolicy( req.context, md_resource=rs_type.namespace, enforcer=self.policy ).check('get_metadef_resource_type')] resource_types = ResourceTypeAssociations() resource_types.resource_type_associations = rs_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return resource_types def create(self, req, resource_type, namespace): rs_type_factory = self.gateway.get_metadef_resource_type_factory( req.context) rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) ns_repo = self.gateway.get_metadef_namespace_repo( req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef resource type is created for Metadef # namespaces. Here we are just checking if user is authorized # to create metadef resource types or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).add_metadef_resource_type_association() new_resource_type = rs_type_factory.new_resource_type( namespace=namespace, **resource_type.to_dict()) rs_type_repo.add(new_resource_type) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata resource type " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return ResourceTypeAssociation.to_wsme_model(new_resource_type) def delete(self, req, namespace, resource_type): rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) ns_repo = self.gateway.get_metadef_namespace_repo( req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef resource type is created for Metadef # namespaces. Here we are just checking if user is authorized # to delete metadef resource types or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy ).remove_metadef_resource_type_association() filters = {} found = False filters['namespace'] = namespace db_resource_type_list = rs_type_repo.list(filters=filters) for db_resource_type in db_resource_type_list: if db_resource_type.name == resource_type: db_resource_type.delete() rs_type_repo.remove(db_resource_type) found = True if not found: raise exception.NotFound() except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata resource type " "'%s' within '%s' namespace", resource_type, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound: msg = (_("Failed to find resource type %(resourcetype)s to " "delete") % {'resourcetype': resource_type}) LOG.error(msg) raise webob.exc.HTTPNotFound(explanation=msg) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) resource_type = json.fromjson(ResourceTypeAssociation, body) return dict(resource_type=resource_type) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociations, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def index(self, response, result): resource_type_json = json.tojson(ResourceTypes, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def create(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociation, result) response.status_int = http.CREATED body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def delete(self, response, result): response.status_int = http.NO_CONTENT def _get_base_properties(): return { 'name': { 'type': 'string', 'description': _('Resource type names should be aligned with Heat ' 'resource types whenever possible: ' 'https://docs.openstack.org/heat/latest/' 'template_guide/openstack.html'), 'maxLength': 80, }, 'prefix': { 'type': 'string', 'description': _('Specifies the prefix to use for the given ' 'resource type. Any properties in the namespace ' 'should be prefixed with this prefix when being ' 'applied to the specified resource type. Must ' 'include prefix separator (e.g. a colon :).'), 'maxLength': 80, }, 'properties_target': { 'type': 'string', 'description': _('Some resource types allow more than one key / ' 'value pair per instance. For example, Cinder ' 'allows user and image metadata on volumes. Only ' 'the image properties metadata is evaluated by ' 'Nova (scheduling or drivers). This property ' 'allows a namespace target to remove the ' 'ambiguity.'), 'maxLength': 80, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of resource type association"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last resource type " "association modification"), "format": "date-time" } } def get_schema(): properties = _get_base_properties() mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() schema = glance.schema.Schema( 'resource_type_association', properties, required=mandatory_attrs, ) return schema def get_collection_schema(): resource_type_schema = get_schema() return glance.schema.CollectionSchema('resource_type_associations', resource_type_schema) def create_resource(): """ResourceTypeAssociation resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ResourceTypeController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/metadef_tags.py0000664000175000017500000004545000000000000020414 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_tag import MetadefTag from glance.api.v2.model.metadef_tag import MetadefTags from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import wsgi from glance.common import wsme_utils import glance.db from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class TagsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, schema=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.schema = schema or get_schema() self.tag_schema_link = '/v2/schemas/metadefs/tag' def create(self, req, namespace, tag_name): tag_factory = self.gateway.get_metadef_tag_factory(req.context) tag_repo = self.gateway.get_metadef_tag_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) tag_name_as_dict = {'name': tag_name} try: self.schema.validate(tag_name_as_dict) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) try: # NOTE(abhishekk): Metadef tags is created for Metadef namespaces # Here we are just checking if user is authorized to create metadef # tag or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).add_metadef_tag() new_meta_tag = tag_factory.new_tag( namespace=namespace, **tag_name_as_dict) tag_repo.add(new_meta_tag) except exception.Invalid as e: msg = (_("Couldn't create metadata tag: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata tag within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return MetadefTag.to_wsme_model(new_meta_tag) def create_tags(self, req, metadata_tags, namespace): tag_factory = self.gateway.get_metadef_tag_factory(req.context) tag_repo = self.gateway.get_metadef_tag_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef tags is created for Metadef namespaces # Here we are just checking if user is authorized to create metadef # tag or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).add_metadef_tags() can_append = strutils.bool_from_string(req.headers.get( 'X-Openstack-Append')) tag_list = [] for metadata_tag in metadata_tags.tags: tag_list.append(tag_factory.new_tag( namespace=namespace, **metadata_tag.to_dict())) tag_repo.add_tags(tag_list, can_append) tag_list_out = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in tag_list] metadef_tags = MetadefTags() metadef_tags.tags = tag_list_out except exception.Forbidden as e: LOG.debug("User not permitted to create metadata tags within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return metadef_tags def index(self, req, namespace, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): This is just a "do you have permission to # list tags" check. Each object is checked against # get_metadef_tag below. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).get_metadef_tags() filters = filters or dict() filters['namespace'] = namespace tag_repo = self.gateway.get_metadef_tag_repo(req.context) if marker: metadef_tag = tag_repo.get(namespace, marker) marker = metadef_tag.tag_id db_metatag_list = tag_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) tag_list = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in db_metatag_list] metadef_tags = MetadefTags() metadef_tags.tags = tag_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata tags " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) return metadef_tags def show(self, req, namespace, tag_name): meta_tag_repo = self.gateway.get_metadef_tag_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef tags are associated with # namespace, so made provision to pass namespace here # for visibility check api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).get_metadef_tag() metadef_tag = meta_tag_repo.get(namespace, tag_name) return MetadefTag.to_wsme_model(metadef_tag) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def update(self, req, metadata_tag, namespace, tag_name): meta_repo = self.gateway.get_metadef_tag_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef tags is created for Metadef namespaces # Here we are just checking if user is authorized to update metadef # tag or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).modify_metadef_tag() metadef_tag = meta_repo.get(namespace, tag_name) metadef_tag._old_name = metadef_tag.name metadef_tag.name = wsme_utils._get_value( metadata_tag.name) updated_metadata_tag = meta_repo.save(metadef_tag) except exception.Invalid as e: msg = (_("Couldn't update metadata tag: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) return MetadefTag.to_wsme_model(updated_metadata_tag) def delete(self, req, namespace, tag_name): meta_repo = self.gateway.get_metadef_tag_repo(req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) except (exception.Forbidden, exception.NotFound): # NOTE (abhishekk): Returning 404 Not Found as the # namespace is outside of this user's project msg = _("Namespace %s not found") % namespace raise webob.exc.HTTPNotFound(explanation=msg) try: # NOTE(abhishekk): Metadef tags is created for Metadef namespaces # Here we are just checking if user is authorized to delete metadef # tag or not. api_policy.MetadefAPIPolicy( req.context, md_resource=namespace_obj, enforcer=self.policy).delete_metadef_tag() metadef_tag = meta_repo.get(namespace, tag_name) metadef_tag.delete() meta_repo.remove(metadef_tag) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def _get_base_definitions(): return None def _get_base_properties(): return { "name": { "type": "string", "maxLength": 80 }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of tag creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last tag modification"), "format": "date-time" } } def _get_base_properties_for_list(): return { "tags": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, 'required': ['name'], "additionalProperties": False } }, } def get_schema(): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = MetadefTag.get_mandatory_attrs() schema = glance.schema.Schema( 'tag', properties, required=mandatory_attrs, definitions=definitions, ) return schema def get_schema_for_list(): definitions = _get_base_definitions() properties = _get_base_properties_for_list() schema = glance.schema.Schema( 'tags', properties, required=None, definitions=definitions, ) return schema def get_collection_schema(): tag_schema = get_schema() return glance.schema.CollectionSchema('tags', tag_schema) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() self.schema_for_list = get_schema_for_list() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private', 'shared']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_tag = json.fromjson(MetadefTag, body) return dict(metadata_tag=metadata_tag) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker: query_params['marker'] = marker if limit: query_params['limit'] = self._validate_limit(limit) return query_params def create_tags(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema_for_list.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_tags = json.fromjson(MetadefTags, body) return dict(metadata_tags=metadata_tags) @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def create(self, response, metadata_tag): response.status_int = http.CREATED self.show(response, metadata_tag) def create_tags(self, response, result): response.status_int = http.CREATED metadata_tags_json = json.tojson(MetadefTags, result) body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def show(self, response, metadata_tag): metadata_tag_json = json.tojson(MetadefTag, metadata_tag) body = jsonutils.dumps(metadata_tag_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def update(self, response, metadata_tag): response.status_int = http.OK self.show(response, metadata_tag) def index(self, response, result): metadata_tags_json = json.tojson(MetadefTags, result) body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) response.unicode_body = body response.content_type = 'application/json' def delete(self, response, result): response.status_int = http.NO_CONTENT def get_tag_href(namespace_name, metadef_tag): base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadef_tag.name)) return base_href def create_resource(): """Metadef tags resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = TagsController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8503022 glance-29.0.0/glance/api/v2/model/0000775000175000017500000000000000000000000016507 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/__init__.py0000664000175000017500000000000000000000000020606 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_namespace.py0000664000175000017500000000571500000000000022512 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme.rest import json from wsme import types from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_tag import MetadefTag from glance.common.wsme_utils import WSMEModelTransformer class Namespace(types.Base, WSMEModelTransformer): # Base fields namespace = wsme.wsattr(types.text, mandatory=True) display_name = wsme.wsattr(types.text, mandatory=False) description = wsme.wsattr(types.text, mandatory=False) visibility = wsme.wsattr(types.text, mandatory=False) protected = wsme.wsattr(bool, mandatory=False) owner = wsme.wsattr(types.text, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) # Contained fields resource_type_associations = wsme.wsattr([ResourceTypeAssociation], mandatory=False) properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) objects = wsme.wsattr([MetadefObject], mandatory=False) tags = wsme.wsattr([MetadefTag], mandatory=False) # Generated fields self = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=False) def __init__(cls, **kwargs): super(Namespace, cls).__init__(**kwargs) @staticmethod def to_model_properties(db_property_types): property_types = {} for db_property_type in db_property_types: # Convert the persisted json schema to a dict of PropertyTypes property_type = json.fromjson( PropertyType, db_property_type.schema) property_type_name = db_property_type.name property_types[property_type_name] = property_type return property_types class Namespaces(types.Base, WSMEModelTransformer): namespaces = wsme.wsattr([Namespace], mandatory=False) # Pagination next = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=True) first = wsme.wsattr(types.text, mandatory=True) def __init__(self, **kwargs): super(Namespaces, self).__init__(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_object.py0000664000175000017500000000335000000000000022015 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.api.v2.model.metadef_property_type import PropertyType from glance.common.wsme_utils import WSMEModelTransformer class MetadefObject(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) required = wsme.wsattr([types.text], mandatory=False) description = wsme.wsattr(types.text, mandatory=False) properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) # Generated fields self = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=False) def __init__(cls, **kwargs): super(MetadefObject, cls).__init__(**kwargs) class MetadefObjects(types.Base, WSMEModelTransformer): objects = wsme.wsattr([MetadefObject], mandatory=False) schema = wsme.wsattr(types.text, mandatory=True) def __init__(self, **kwargs): super(MetadefObjects, self).__init__(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_property_item_type.py0000664000175000017500000000161200000000000024511 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types class ItemType(types.Base): type = wsme.wsattr(types.text, mandatory=True) enum = wsme.wsattr([types.text], mandatory=False) _wsme_attr_order = ('type', 'enum') def __init__(self, **kwargs): super(ItemType, self).__init__(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_property_type.py0000664000175000017500000000446200000000000023501 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.api.v2.model.metadef_property_item_type import ItemType from glance.common.wsme_utils import WSMEModelTransformer class PropertyType(types.Base, WSMEModelTransformer): # When used in collection of PropertyTypes, name is a dictionary key # and not included as separate field. name = wsme.wsattr(types.text, mandatory=False) type = wsme.wsattr(types.text, mandatory=True) title = wsme.wsattr(types.text, mandatory=True) description = wsme.wsattr(types.text, mandatory=False) operators = wsme.wsattr([types.text], mandatory=False) default = wsme.wsattr(types.bytes, mandatory=False) readonly = wsme.wsattr(bool, mandatory=False) # fields for type = string minimum = wsme.wsattr(int, mandatory=False) maximum = wsme.wsattr(int, mandatory=False) enum = wsme.wsattr([types.text], mandatory=False) pattern = wsme.wsattr(types.text, mandatory=False) # fields for type = integer, number minLength = wsme.wsattr(int, mandatory=False) maxLength = wsme.wsattr(int, mandatory=False) confidential = wsme.wsattr(bool, mandatory=False) # fields for type = array items = wsme.wsattr(ItemType, mandatory=False) uniqueItems = wsme.wsattr(bool, mandatory=False) minItems = wsme.wsattr(int, mandatory=False) maxItems = wsme.wsattr(int, mandatory=False) additionalItems = wsme.wsattr(bool, mandatory=False) def __init__(self, **kwargs): super(PropertyType, self).__init__(**kwargs) class PropertyTypes(types.Base, WSMEModelTransformer): properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) def __init__(self, **kwargs): super(PropertyTypes, self).__init__(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_resource_type.py0000664000175000017500000000421200000000000023435 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.common.wsme_utils import WSMEModelTransformer class ResourceTypeAssociation(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) prefix = wsme.wsattr(types.text, mandatory=False) properties_target = wsme.wsattr(types.text, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) def __init__(self, **kwargs): super(ResourceTypeAssociation, self).__init__(**kwargs) class ResourceTypeAssociations(types.Base, WSMEModelTransformer): resource_type_associations = wsme.wsattr([ResourceTypeAssociation], mandatory=False) def __init__(self, **kwargs): super(ResourceTypeAssociations, self).__init__(**kwargs) class ResourceType(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) def __init__(self, **kwargs): super(ResourceType, self).__init__(**kwargs) class ResourceTypes(types.Base, WSMEModelTransformer): resource_types = wsme.wsattr([ResourceType], mandatory=False) def __init__(self, **kwargs): super(ResourceTypes, self).__init__(**kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/model/metadef_tag.py0000664000175000017500000000216300000000000021323 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.common import wsme_utils class MetadefTag(types.Base, wsme_utils.WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) class MetadefTags(types.Base, wsme_utils.WSMEModelTransformer): tags = wsme.wsattr([MetadefTag], mandatory=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/policy.py0000664000175000017500000004314200000000000017264 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import webob.exc from glance.api import policy from glance.common import exception from glance.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF # TODO(danms): Remove this once secure RBAC is fully implemented and # used instead of legacy policy checks. def check_is_image_mutable(context, image): """Replicate the DB-layer admin-or-owner check for the API. Much of the API code depends on hard-coded admin-or-owner enforcement in the DB or authorization layer, as the policy layer is largely a no-op by default. During blueprint policy-refactor, we are trying to remove as much of that as possible, but in certain places we need to do that (if secure_rbac is not enabled). This transitional helper provides a way to do that enforcement where necessary. :param context: A RequestContext :param image: An ImageProxy :raises: exception.Forbidden if the context is not the owner or an admin """ # Is admin == image mutable if context.is_admin: return # No owner == image not mutable # Image only mutable by its owner if (image.owner is None or context.owner is None or image.owner != context.owner): raise exception.Forbidden(_('You do not own this image')) def check_admin_or_same_owner(context, properties): """Check that legacy behavior on create with owner is preserved. Legacy behavior requires a static check that owner is not inconsistent with the context, unless the caller is an admin. Enforce that here, if needed. :param context: A RequestContext :param properties: The properties being used to create the image, which may contain an owner :raises: exception.Forbidden if the context is not an admin and owner is set to something other than the context's project """ if context.is_admin: return if context.project_id != properties.get('owner', context.project_id): msg = _("You are not permitted to create images " "owned by '%s'.") raise exception.Forbidden(msg % properties['owner']) class APIPolicyBase(object): def __init__(self, context, target=None, enforcer=None): self._context = context self._target = target or {} self.enforcer = enforcer or policy.Enforcer() def _enforce(self, rule_name): try: self.enforcer.enforce(self._context, rule_name, self._target) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=str(e)) def check(self, name, *args): """Perform a soft check of a named policy. This is used when you need to check if a policy is allowed for the given resource, without needing to catch an exception. If the policy check requires args, those are accepted here as well. :param name: Policy name to check :returns: bool indicating if the policy is allowed. """ try: getattr(self, name)(*args) return True except webob.exc.HTTPForbidden: return False class CacheImageAPIPolicy(APIPolicyBase): def __init__(self, context, image=None, policy_str=None, target=None, enforcer=None): self._context = context target = {} self._image = image if self._image: target = policy.ImageTarget(self._image) self._target = target self.enforcer = enforcer or policy.Enforcer() self.policy_str = policy_str super(CacheImageAPIPolicy, self).__init__(context, target, enforcer) def manage_image_cache(self): self._enforce(self.policy_str) class DiscoveryAPIPolicy(APIPolicyBase): def __init__(self, context, target=None, enforcer=None): self._context = context self._target = target or {} self.enforcer = enforcer or policy.Enforcer() super(DiscoveryAPIPolicy, self).__init__(context, target, enforcer) def stores_info_detail(self): self._enforce('stores_info_detail') class ImageAPIPolicy(APIPolicyBase): def __init__(self, context, image, enforcer=None): """Image API policy module. :param context: The RequestContext :param image: The ImageProxy object in question, or a dict of image properties if no image is yet created or needed for authorization context. :param enforcer: The policy.Enforcer object to use for enforcement operations. If not provided (or None), the default enforcer will be selected. """ self._image = image if not self.is_created: # NOTE(danms): If we are being called with a dict of image # properties then we are testing policies that involve # creating an image or other image-related resources but # without a specific image for context. The target is a # dict of proposed image properties, similar to the # dict-like interface the ImageTarget provides over # a real Image object, with specific keys. target = {'project_id': image.get('owner', context.project_id), 'owner': image.get('owner', context.project_id), 'visibility': image.get('visibility', 'private')} else: target = policy.ImageTarget(image) super(ImageAPIPolicy, self).__init__(context, target, enforcer) @property def is_created(self): """Signal whether the image actually exists or not. False if the image is only being proposed by a create operation, True if it has already been created. """ return not isinstance(self._image, dict) def _enforce(self, rule_name): """Translate Forbidden->NotFound for images.""" try: super(ImageAPIPolicy, self)._enforce(rule_name) except webob.exc.HTTPForbidden: # If we are checking image policy before creating an # image, or without a specific image for context, then we # do not need to potentially hide the presence of anything # based on visibility, so re-raise immediately. if not self.is_created: raise # If we are checking get_image, then Forbidden means the # user cannot see this image, so raise NotFound. If we are # checking anything else and get Forbidden, then raise # NotFound in that case as well to avoid exposing images # the user can not see, while preserving the Forbidden # behavior for the ones they can see. if rule_name == 'get_image' or not self.check('get_image'): raise webob.exc.HTTPNotFound() raise def check(self, name, *args): try: return super(ImageAPIPolicy, self).check(name, *args) except webob.exc.HTTPNotFound: # NOTE(danms): Since our _enforce can raise NotFound, that # too means a False check response. return False def _enforce_visibility(self, visibility): # NOTE(danms): Use the existing enforcement routine for now, # which shows that we're enforcing the same behavior. In the # future, that should probably be moved here. try: policy._enforce_image_visibility(self.enforcer, self._context, visibility, self._target) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=str(e)) def update_property(self, name, value=None): if name == 'visibility': # NOTE(danms): Visibility changes have their own policy, # so check that first, followed by the general # modify_image policy below. self._enforce_visibility(value) self.modify_image() def update_locations(self): self._enforce('set_image_location') def delete_locations(self): self._enforce('delete_image_location') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def get_image_location(self): self._enforce('get_image_location') def add_location(self): self._enforce('add_image_location') def get_locations(self): self._enforce('fetch_image_location') def add_image(self): try: self._enforce('add_image') except webob.exc.HTTPForbidden: # NOTE(danms): If we fail add_image because the owner is # different, alter the message to be informative and # in-line with the current message users have been getting # in the past. if self._target['owner'] != self._context.project_id: msg = _("You are not permitted to create images " "owned by '%s'" % self._target['owner']) raise webob.exc.HTTPForbidden(msg) else: raise if 'visibility' in self._target: self._enforce_visibility(self._target['visibility']) if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_admin_or_same_owner(self._context, self._target) def get_image(self): self._enforce('get_image') def get_images(self): self._enforce('get_images') def delete_image(self): self._enforce('delete_image') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def upload_image(self): self._enforce('upload_image') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def download_image(self): self._enforce('download_image') def modify_image(self): self._enforce('modify_image') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def deactivate_image(self): self._enforce('deactivate') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def reactivate_image(self): self._enforce('reactivate') # TODO(danms): Remove this legacy fallback when secure RBAC # replaces the legacy policy. if not (CONF.oslo_policy.enforce_new_defaults or CONF.oslo_policy.enforce_scope): check_is_image_mutable(self._context, self._image) def copy_image(self): self._enforce('copy_image') class MetadefAPIPolicy(APIPolicyBase): def __init__(self, context, md_resource=None, target=None, enforcer=None): self._context = context self._md_resource = md_resource if not target: self._target = self._build_target() else: self._target = target self.enforcer = enforcer or policy.Enforcer() super(MetadefAPIPolicy, self).__init__(context, target=self._target, enforcer=self.enforcer) def _build_target(self): target = { "project_id": self._context.project_id } if self._md_resource: target['project_id'] = self._md_resource.owner target['visibility'] = self._md_resource.visibility return target def _enforce(self, rule_name): """Translate Forbidden->NotFound for images.""" try: super(MetadefAPIPolicy, self)._enforce(rule_name) except webob.exc.HTTPForbidden: # If we are checking get_metadef_namespace, then Forbidden means # the user cannot see this namespace, so raise NotFound. If we are # checking anything else and get Forbidden, then raise # NotFound in that case as well to avoid exposing namespaces # the user can not see, while preserving the Forbidden # behavior for the ones they can see. if rule_name == 'get_metadef_namespace' or not self.check( 'get_metadef_namespace'): raise webob.exc.HTTPNotFound() raise def check(self, name, *args): try: return super(MetadefAPIPolicy, self).check(name, *args) except webob.exc.HTTPNotFound: # NOTE(danms): Since our _enforce can raise NotFound, that # too means a False check response. return False def get_metadef_namespace(self): self._enforce('get_metadef_namespace') def get_metadef_namespaces(self): self._enforce('get_metadef_namespaces') def add_metadef_namespace(self): self._enforce('add_metadef_namespace') def modify_metadef_namespace(self): self._enforce('modify_metadef_namespace') def delete_metadef_namespace(self): self._enforce('delete_metadef_namespace') def get_metadef_objects(self): self._enforce('get_metadef_objects') def add_metadef_object(self): self._enforce('add_metadef_object') def get_metadef_object(self): self._enforce('get_metadef_object') def modify_metadef_object(self): self._enforce('modify_metadef_object') def delete_metadef_object(self): self._enforce('delete_metadef_object') def add_metadef_tag(self): self._enforce('add_metadef_tag') def get_metadef_tags(self): self._enforce('get_metadef_tags') def add_metadef_tags(self): self._enforce('add_metadef_tags') def get_metadef_tag(self): self._enforce('get_metadef_tag') def modify_metadef_tag(self): self._enforce('modify_metadef_tag') def delete_metadef_tag(self): self._enforce('delete_metadef_tag') def delete_metadef_tags(self): self._enforce('delete_metadef_tags') def add_metadef_property(self): self._enforce('add_metadef_property') def get_metadef_properties(self): self._enforce('get_metadef_properties') def remove_metadef_property(self): self._enforce('remove_metadef_property') def get_metadef_property(self): self._enforce('get_metadef_property') def modify_metadef_property(self): self._enforce('modify_metadef_property') def add_metadef_resource_type_association(self): self._enforce('add_metadef_resource_type_association') def list_metadef_resource_types(self): self._enforce('list_metadef_resource_types') def get_metadef_resource_type(self): self._enforce('get_metadef_resource_type') def remove_metadef_resource_type_association(self): self._enforce('remove_metadef_resource_type_association') class MemberAPIPolicy(APIPolicyBase): def __init__(self, context, image, target=None, enforcer=None): self._context = context self._image = image if not target: self._target = self._build_target() self.enforcer = enforcer or policy.Enforcer() super(MemberAPIPolicy, self).__init__(context, target=self._target, enforcer=self.enforcer) def _build_target(self): target = { "project_id": self._context.project_id } if self._image: target = policy.ImageTarget(self._image) return target def _enforce(self, rule_name): ImageAPIPolicy(self._context, self._image, enforcer=self.enforcer).get_image() super(MemberAPIPolicy, self)._enforce(rule_name) def get_members(self): self._enforce("get_members") def get_member(self): self._enforce("get_member") def delete_member(self): self._enforce("delete_member") def modify_member(self): self._enforce("modify_member") def add_member(self): self._enforce("add_member") class TasksAPIPolicy(APIPolicyBase): def __init__(self, context, target=None, enforcer=None): self._context = context self._target = target or {} self.enforcer = enforcer or policy.Enforcer() super(TasksAPIPolicy, self).__init__(context, target=self._target, enforcer=self.enforcer) def tasks_api_access(self): self._enforce('tasks_api_access') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/router.py0000664000175000017500000007342000000000000017307 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.v2 import cached_images from glance.api.v2 import discovery from glance.api.v2 import image_actions from glance.api.v2 import image_data from glance.api.v2 import image_members from glance.api.v2 import image_tags from glance.api.v2 import images from glance.api.v2 import metadef_namespaces from glance.api.v2 import metadef_objects from glance.api.v2 import metadef_properties from glance.api.v2 import metadef_resource_types from glance.api.v2 import metadef_tags from glance.api.v2 import schemas from glance.api.v2 import tasks from glance.common import wsgi class API(wsgi.Router): """WSGI router for Glance v2 API requests.""" def __init__(self, mapper): custom_image_properties = images.load_custom_properties() reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) schemas_resource = schemas.create_resource(custom_image_properties) mapper.connect('/schemas/image', controller=schemas_resource, action='image', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/image', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/images', controller=schemas_resource, action='images', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/images', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/member', controller=schemas_resource, action='member', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/member', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/members', controller=schemas_resource, action='members', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/members', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/task', controller=schemas_resource, action='task', conditions={'method': ['GET']}) mapper.connect('/schemas/task', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/tasks', controller=schemas_resource, action='tasks', conditions={'method': ['GET']}) mapper.connect('/schemas/tasks', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/namespace', controller=schemas_resource, action='metadef_namespace', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/namespace', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/namespaces', controller=schemas_resource, action='metadef_namespaces', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/namespaces', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/resource_type', controller=schemas_resource, action='metadef_resource_type', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/resource_type', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/resource_types', controller=schemas_resource, action='metadef_resource_types', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/property', controller=schemas_resource, action='metadef_property', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/property', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/properties', controller=schemas_resource, action='metadef_properties', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/properties', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/object', controller=schemas_resource, action='metadef_object', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/object', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/objects', controller=schemas_resource, action='metadef_objects', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/objects', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/tag', controller=schemas_resource, action='metadef_tag', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/tag', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/tags', controller=schemas_resource, action='metadef_tags', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/tags', controller=reject_method_resource, action='reject', allowed_methods='GET') # Metadef resource types metadef_resource_types_resource = ( metadef_resource_types.create_resource()) mapper.connect('/metadefs/resource_types', controller=metadef_resource_types_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=metadef_resource_types_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=metadef_resource_types_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' '{resource_type}', controller=metadef_resource_types_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' '{resource_type}', controller=reject_method_resource, action='reject', allowed_methods='DELETE') # Metadef Namespaces metadef_namespace_resource = metadef_namespaces.create_resource() mapper.connect('/metadefs/namespaces', controller=metadef_namespace_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces', controller=metadef_namespace_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef namespace properties metadef_properties_resource = metadef_properties.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_properties_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_properties_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_namespace_resource, action='delete_properties', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='show', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='delete', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef objects metadef_objects_resource = metadef_objects.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_objects_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_objects_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_namespace_resource, action='delete_objects', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef tags metadef_tags_resource = metadef_tags.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_tags_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_tags_resource, action='create_tags', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_namespace_resource, action='delete_tags', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='create', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, PUT, DELETE') images_resource = images.create_resource(custom_image_properties) mapper.connect('/images', controller=images_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/images', controller=images_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/images', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/images/{image_id}', controller=images_resource, action='update', conditions={'method': ['PATCH']}) mapper.connect('/images/{image_id}', controller=images_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/tasks', controller=images_resource, action='get_task_info', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}', controller=images_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, PATCH, DELETE') mapper.connect('/images/{image_id}/import', controller=images_resource, action='import_image', conditions={'method': ['POST']}) mapper.connect('/images/{image_id}/import', controller=reject_method_resource, action='reject', allowed_methods='POST') mapper.connect('/stores/{store_id}/{image_id}', controller=images_resource, action='delete_from_store', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/stores/{store_id}/{image_id}', controller=reject_method_resource, action='reject', allowed_methods='DELETE') image_actions_resource = image_actions.create_resource() mapper.connect('/images/{image_id}/actions/deactivate', controller=image_actions_resource, action='deactivate', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/images/{image_id}/actions/reactivate', controller=image_actions_resource, action='reactivate', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/images/{image_id}/actions/deactivate', controller=reject_method_resource, action='reject', allowed_methods='POST') mapper.connect('/images/{image_id}/actions/reactivate', controller=reject_method_resource, action='reject', allowed_methods='POST') image_data_resource = image_data.create_resource() mapper.connect('/images/{image_id}/file', controller=image_data_resource, action='download', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/file', controller=image_data_resource, action='upload', conditions={'method': ['PUT']}) mapper.connect('/images/{image_id}/file', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT') mapper.connect('/images/{image_id}/stage', controller=image_data_resource, action='stage', conditions={'method': ['PUT']}) mapper.connect('/images/{image_id}/stage', controller=reject_method_resource, action='reject', allowed_methods='PUT') # Location APIs image_actions_resource = image_actions.create_resource() mapper.connect('/images/{image_id}/locations', controller=images_resource, action='add_location', conditions={'method': ['POST']}) mapper.connect('/images/{image_id}/locations', controller=images_resource, action='get_locations', conditions={'method': ['GET']}) mapper.connect('/images/{image_id}/locations', controller=reject_method_resource, action='reject', allowed_methods='POST') image_tags_resource = image_tags.create_resource() mapper.connect('/images/{image_id}/tags/{tag_value}', controller=image_tags_resource, action='update', conditions={'method': ['PUT']}, body_reject=True) mapper.connect('/images/{image_id}/tags/{tag_value}', controller=image_tags_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}/tags/{tag_value}', controller=reject_method_resource, action='reject', allowed_methods='PUT, DELETE') image_members_resource = image_members.create_resource() mapper.connect('/images/{image_id}/members', controller=image_members_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/members', controller=image_members_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/images/{image_id}/members', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}/members/{member_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') tasks_resource = tasks.create_resource() mapper.connect('/tasks', controller=tasks_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/tasks', controller=tasks_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/tasks', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/tasks/{task_id}', controller=tasks_resource, action='get', conditions={'method': ['GET']}) mapper.connect('/tasks/{task_id}', controller=tasks_resource, action='delete', conditions={'method': ['DELETE']}) mapper.connect('/tasks/{task_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, DELETE') # Discovery API info_resource = discovery.create_resource() mapper.connect('/info/import', controller=info_resource, action='get_image_import', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/info/import', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/info/stores', controller=info_resource, action='get_stores', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/info/stores', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/info/usage', controller=info_resource, action='get_usage', conditions={'method': ['GET']}) mapper.connect('/info/stores/detail', controller=info_resource, action='get_stores_detail', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/info/stores/detail', controller=reject_method_resource, action='reject', allowed_methods='GET') # Cache Management API cache_manage_resource = cached_images.create_resource() mapper.connect('/cache', controller=cache_manage_resource, action='get_cache_state', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/cache', controller=cache_manage_resource, action='clear_cache', conditions={'method': ['DELETE']}) mapper.connect('/cache', controller=reject_method_resource, action='reject', allowed_methods='GET, DELETE') mapper.connect('/cache/{image_id}', controller=cache_manage_resource, action='delete_cache_entry', conditions={'method': ['DELETE']}) mapper.connect('/cache/{image_id}', controller=cache_manage_resource, action='queue_image_from_api', conditions={'method': ['PUT']}) mapper.connect('/cache/{image_id}', controller=reject_method_resource, action='reject', allowed_methods='DELETE, PUT') super(API, self).__init__(mapper) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/schemas.py0000664000175000017500000000755300000000000017416 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.v2 import image_members from glance.api.v2 import images from glance.api.v2 import metadef_namespaces from glance.api.v2 import metadef_objects from glance.api.v2 import metadef_properties from glance.api.v2 import metadef_resource_types from glance.api.v2 import metadef_tags from glance.api.v2 import tasks from glance.common import wsgi class Controller(object): def __init__(self, custom_image_properties=None): self.image_schema = images.get_schema(custom_image_properties) self.image_collection_schema = images.get_collection_schema( custom_image_properties) self.member_schema = image_members.get_schema() self.member_collection_schema = image_members.get_collection_schema() self.task_schema = tasks.get_task_schema() self.task_collection_schema = tasks.get_collection_schema() # Metadef schemas self.metadef_namespace_schema = metadef_namespaces.get_schema() self.metadef_namespace_collection_schema = ( metadef_namespaces.get_collection_schema()) self.metadef_resource_type_schema = metadef_resource_types.get_schema() self.metadef_resource_type_collection_schema = ( metadef_resource_types.get_collection_schema()) self.metadef_property_schema = metadef_properties.get_schema() self.metadef_property_collection_schema = ( metadef_properties.get_collection_schema()) self.metadef_object_schema = metadef_objects.get_schema() self.metadef_object_collection_schema = ( metadef_objects.get_collection_schema()) self.metadef_tag_schema = metadef_tags.get_schema() self.metadef_tag_collection_schema = ( metadef_tags.get_collection_schema()) def image(self, req): return self.image_schema.raw() def images(self, req): return self.image_collection_schema.raw() def member(self, req): return self.member_schema.minimal() def members(self, req): return self.member_collection_schema.minimal() def task(self, req): return self.task_schema.minimal() def tasks(self, req): return self.task_collection_schema.minimal() def metadef_namespace(self, req): return self.metadef_namespace_schema.raw() def metadef_namespaces(self, req): return self.metadef_namespace_collection_schema.raw() def metadef_resource_type(self, req): return self.metadef_resource_type_schema.raw() def metadef_resource_types(self, req): return self.metadef_resource_type_collection_schema.raw() def metadef_property(self, req): return self.metadef_property_schema.raw() def metadef_properties(self, req): return self.metadef_property_collection_schema.raw() def metadef_object(self, req): return self.metadef_object_schema.raw() def metadef_objects(self, req): return self.metadef_object_collection_schema.raw() def metadef_tag(self, req): return self.metadef_tag_schema.raw() def metadef_tags(self, req): return self.metadef_tag_collection_schema.raw() def create_resource(custom_image_properties=None): controller = Controller(custom_image_properties) return wsgi.Resource(controller) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/v2/tasks.py0000664000175000017500000004032500000000000017112 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client as http import urllib.parse as urlparse import debtcollector import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_serialization.jsonutils as json from oslo_utils import encodeutils from oslo_utils import uuidutils import webob.exc from glance.api import common from glance.api import policy from glance.api.v2 import policy as api_policy from glance.common import exception from glance.common import timeutils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LW import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') _DEPRECATION_MESSAGE = ("The task API is being deprecated and " "it will be superseded by the new image import " "API. Please refer to this link for more " "information about the aforementioned process: " "https://specs.openstack.org/openstack/glance-specs/" "specs/mitaka/approved/image-import/" "image-import-refactor.html") class TasksController(object): """Manages operations on tasks.""" def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def create(self, req, task): # NOTE(rosmaita): access to this call is enforced in the deserializer ctxt = req.context task_factory = self.gateway.get_task_factory(ctxt) executor_factory = self.gateway.get_task_executor_factory(ctxt) task_repo = self.gateway.get_task_repo(ctxt) try: new_task = task_factory.new_task( task_type=task['type'], owner=ctxt.owner, task_input=task['input'], image_id=task['input'].get('image_id'), user_id=ctxt.user_id, request_id=ctxt.request_id) task_repo.add(new_task) task_executor = executor_factory.new_task_executor(ctxt) pool = common.get_thread_pool("tasks_pool") pool.spawn(new_task.run, task_executor) except exception.Forbidden as e: msg = (_LW("Forbidden to create task. Reason: %(reason)s") % {'reason': encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return new_task @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): # NOTE(rosmaita): access to this call is enforced in the deserializer result = {} if filters is None: filters = {} filters['deleted'] = False if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) task_repo = self.gateway.get_task_stub_repo(req.context) try: tasks = task_repo.list(marker, limit, sort_key, sort_dir, filters) if len(tasks) != 0 and len(tasks) == limit: result['next_marker'] = tasks[-1].task_id except (exception.NotFound, exception.InvalidSortKey, exception.InvalidFilterRangeValue) as e: LOG.warning(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.warning(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPForbidden(explanation=e.msg) result['tasks'] = tasks return result @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def get(self, req, task_id): _enforce_access_policy(self.policy, req) try: task_repo = self.gateway.get_task_repo(req.context) task = task_repo.get(task_id) except exception.NotFound as e: msg = (_LW("Failed to find task %(task_id)s. Reason: %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: msg = (_LW("Forbidden to get task %(task_id)s. Reason:" " %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return task @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def delete(self, req, task_id): _enforce_access_policy(self.policy, req) msg = (_("This operation is currently not permitted on Glance Tasks. " "They are auto deleted after reaching the time based on " "their expires_at property.")) raise webob.exc.HTTPMethodNotAllowed(explanation=msg, headers={'Allow': 'GET'}, body_template='${explanation}') class RequestDeserializer(wsgi.JSONRequestDeserializer): _required_properties = ['type', 'input'] def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): status = filters.get('status') if status: if status not in ['pending', 'processing', 'success', 'failure']: msg = _('Invalid status value: %s') % status raise webob.exc.HTTPBadRequest(explanation=msg) type = filters.get('type') if type: if type not in ['import']: msg = _('Invalid type value: %s') % type raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_marker(self, marker): if marker and not uuidutils.is_uuid_like(marker): msg = _('Invalid marker format') raise webob.exc.HTTPBadRequest(explanation=msg) return marker def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_create_body(self, body): """Validate the body of task creating request""" for param in self._required_properties: if param not in body: msg = _("Task '%s' is required") % param raise webob.exc.HTTPBadRequest(explanation=msg) def __init__(self, schema=None, policy_engine=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_task_schema() # want to enforce the access policy as early as possible self.policy_engine = policy_engine or policy.Enforcer() def create(self, request): _enforce_access_policy(self.policy_engine, request) body = self._get_request_body(request) self._validate_create_body(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) task = {} properties = body for key in self._required_properties: try: task[key] = properties.pop(key) except KeyError: pass return dict(task=task) def index(self, request): _enforce_access_policy(self.policy_engine, request) params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = self._validate_marker(marker) if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, task_schema=None, partial_task_schema=None): super(ResponseSerializer, self).__init__() self.task_schema = task_schema or get_task_schema() self.partial_task_schema = (partial_task_schema or _get_partial_task_schema()) def _inject_location_header(self, response, task): location = self._get_task_location(task) response.headers['Location'] = location def _get_task_location(self, task): return '/v2/tasks/%s' % task.task_id def _format_task(self, schema, task): task_view = { 'id': task.task_id, 'input': task.task_input, 'type': task.type, 'status': task.status, 'owner': task.owner, 'message': task.message, 'result': task.result, 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'self': self._get_task_location(task), 'schema': '/v2/schemas/task' } if task.image_id: task_view['image_id'] = task.image_id if task.request_id: task_view['request_id'] = task.request_id if task.user_id: task_view['user_id'] = task.user_id if task.expires_at: task_view['expires_at'] = timeutils.isotime(task.expires_at) task_view = schema.filter(task_view) # domain return task_view def _format_task_stub(self, schema, task): task_view = { 'id': task.task_id, 'type': task.type, 'status': task.status, 'owner': task.owner, 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'self': self._get_task_location(task), 'schema': '/v2/schemas/task' } if task.expires_at: task_view['expires_at'] = timeutils.isotime(task.expires_at) task_view = schema.filter(task_view) # domain return task_view def create(self, response, task): response.status_int = http.CREATED self._inject_location_header(response, task) self.get(response, task) def get(self, response, task): task_view = self._format_task(self.task_schema, task) response.unicode_body = json.dumps(task_view, ensure_ascii=False) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) body = { 'tasks': [self._format_task_stub(self.partial_task_schema, task) for task in result['tasks']], 'first': '/v2/tasks', 'schema': '/v2/schemas/tasks', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urlparse.urlencode(params) body['next'] = '/v2/tasks?%s' % next_query response.unicode_body = json.dumps(body, ensure_ascii=False) response.content_type = 'application/json' _TASK_SCHEMA = { "id": { "description": _("An identifier for the task"), "pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), "type": "string" }, "type": { "description": _("The type of task represented by this content"), "enum": [ "import", "api_image_import", "location_import", ], "type": "string" }, "status": { "description": _("The current status of this task"), "enum": [ "pending", "processing", "success", "failure" ], "type": "string" }, "input": { "description": _("The parameters required by task, JSON blob"), "type": ["null", "object"], }, "result": { "description": _("The result of current task, JSON blob"), "type": ["null", "object"], }, "owner": { "description": _("An identifier for the owner of this task"), "type": "string" }, "message": { "description": _("Human-readable informative message only included" " when appropriate (usually on failure)"), "type": "string", }, "image_id": { "description": _("Image associated with the task"), "type": "string", }, "request_id": { "description": _("Human-readable informative request-id"), "type": "string", }, "user_id": { "description": _("User associated with the task"), "type": "string", }, "expires_at": { "description": _("Datetime when this resource would be" " subject to removal"), "type": ["null", "string"] }, "created_at": { "description": _("Datetime when this resource was created"), "type": "string" }, "updated_at": { "description": _("Datetime when this resource was updated"), "type": "string" }, 'self': { 'readOnly': True, 'type': 'string' }, 'schema': { 'readOnly': True, 'type': 'string' } } def _enforce_access_policy(policy_engine, request): api_policy.TasksAPIPolicy( request.context, enforcer=policy_engine).tasks_api_access() def get_task_schema(): properties = copy.deepcopy(_TASK_SCHEMA) schema = glance.schema.Schema('task', properties) return schema def _get_partial_task_schema(): properties = copy.deepcopy(_TASK_SCHEMA) hide_properties = ['input', 'result', 'message'] for key in hide_properties: del properties[key] schema = glance.schema.Schema('task', properties) return schema def get_collection_schema(): task_schema = _get_partial_task_schema() return glance.schema.CollectionSchema('tasks', task_schema) def create_resource(): """Task resource factory method""" task_schema = get_task_schema() partial_task_schema = _get_partial_task_schema() deserializer = RequestDeserializer(task_schema) serializer = ResponseSerializer(task_schema, partial_task_schema) controller = TasksController() return wsgi.Resource(controller, deserializer, serializer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/api/versions.py0000664000175000017500000001062200000000000017303 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import urllib from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import webob.dec from glance.common import wsgi from glance.i18n import _ versions_opts = [ cfg.StrOpt('public_endpoint', help=_(""" Public url endpoint to use for Glance versions response. This is the public url endpoint that will appear in the Glance "versions" response. If no value is specified, the endpoint that is displayed in the version's response is that of the host running the API service. Change the endpoint to represent the proxy URL if the API service is running behind a proxy. If the service is running behind a load balancer, add the load balancer's URL for this value. Possible values: * None * Proxy URL * Load balancer URL Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(versions_opts) LOG = logging.getLogger(__name__) class Controller(object): """A wsgi controller that reports which API versions are supported.""" def index(self, req, explicit=False): """Respond to a request for all OpenStack API versions.""" def build_version_object(version, path, status): url = CONF.public_endpoint or req.application_url # Always add '/' to url end for urljoin href url url = url.rstrip('/') + '/' href = urllib.parse.urljoin(url, path).rstrip('/') + '/' return { 'id': 'v%s' % version, 'status': status, 'links': [ { 'rel': 'self', 'href': '%s' % href, }, ], } version_objs = [] if CONF.image_cache_dir: version_objs.extend([ build_version_object('2.16', 'v2', 'SUPPORTED'), build_version_object('2.15', 'v2', 'SUPPORTED'), build_version_object('2.14', 'v2', 'SUPPORTED'), ]) else: version_objs.extend([ build_version_object('2.15', 'v2', 'SUPPORTED'), ]) if CONF.enabled_backends: version_objs.extend([ build_version_object('2.13', 'v2', 'SUPPORTED'), build_version_object('2.12', 'v2', 'SUPPORTED'), build_version_object('2.11', 'v2', 'SUPPORTED'), build_version_object('2.10', 'v2', 'SUPPORTED'), build_version_object('2.9', 'v2', 'SUPPORTED'), build_version_object('2.8', 'v2', 'SUPPORTED'), ]) else: version_objs.extend([ build_version_object('2.9', 'v2', 'SUPPORTED'), ]) version_objs.extend([ build_version_object('2.17', 'v2', 'CURRENT'), build_version_object('2.7', 'v2', 'SUPPORTED'), build_version_object('2.6', 'v2', 'SUPPORTED'), build_version_object('2.5', 'v2', 'SUPPORTED'), build_version_object('2.4', 'v2', 'SUPPORTED'), build_version_object('2.3', 'v2', 'SUPPORTED'), build_version_object('2.2', 'v2', 'SUPPORTED'), build_version_object('2.1', 'v2', 'SUPPORTED'), build_version_object('2.0', 'v2', 'SUPPORTED'), ]) status = explicit and http.client.OK or http.client.MULTIPLE_CHOICES response = webob.Response(request=req, status=status, content_type='application/json') response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) return response @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.index(req) def create_resource(conf): return wsgi.Resource(Controller()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8503022 glance-29.0.0/glance/async_/0000775000175000017500000000000000000000000015563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/__init__.py0000664000175000017500000001564500000000000017707 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import futurist from oslo_log import log as logging from glance.i18n import _LE LOG = logging.getLogger(__name__) class TaskExecutor(object): """Base class for Asynchronous task executors. It does not support the execution mechanism. Provisions the extensible classes with necessary variables to utilize important Glance modules like, context, task_repo, image_repo, image_factory. Note: It also gives abstraction for the standard pre-processing and post-processing operations to be executed by a task. These may include validation checks, security checks, introspection, error handling etc. The aim is to give developers an abstract sense of the execution pipeline logic. Args: context: glance.context.RequestContext object for AuthZ and AuthN checks task_repo: glance.db.TaskRepo object which acts as a translator for glance.domain.Task and glance.domain.TaskStub objects into ORM semantics image_repo: glance.db.ImageRepo object which acts as a translator for glance.domain.Image object into ORM semantics image_factory: glance.domain.ImageFactory object to be used for creating new images for certain types of tasks viz. import, cloning admin_repo: glance.db.ImageRepo object which acts as a translator for glance.domain.Image object into ORM semantics, but with an admin context (optional) """ def __init__(self, context, task_repo, image_repo, image_factory, admin_repo=None): self.context = context self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory self.admin_repo = admin_repo def begin_processing(self, task_id): task = self.task_repo.get(task_id) task.begin_processing() self.task_repo.save(task) # start running self._run(task_id, task.type) def _run(self, task_id, task_type): task = self.task_repo.get(task_id) msg = _LE("This execution of Tasks is not setup. Please consult the " "project documentation for more information on the " "executors available.") LOG.error(msg) task.fail(_LE("Internal error occurred while trying to process task.")) self.task_repo.save(task) class ThreadPoolModel(object): """Base class for an abstract ThreadPool. Do not instantiate this directly, use one of the concrete implementations. """ DEFAULTSIZE = 1 @staticmethod def get_threadpool_executor_class(): """Returns a futurist.ThreadPoolExecutor class.""" pass def __init__(self, size=None): if size is None: size = self.DEFAULTSIZE threadpool_cls = self.get_threadpool_executor_class() LOG.debug('Creating threadpool model %r with size %i', threadpool_cls.__name__, size) self.pool = threadpool_cls(size) def spawn(self, fn, *args, **kwargs): """Spawn a function with args using the thread pool.""" LOG.debug('Spawning with %s: %s(%s, %s)', self.get_threadpool_executor_class().__name__, fn, args, kwargs) return self.pool.submit(fn, *args, **kwargs) def map(self, fn, iterable): """Map a function to each value in an iterable. This spawns a thread for each item in the provided iterable, generating results in the same order. Each item will spawn a thread, and each may run in parallel up to the limit of the pool. :param fn: A function to work on each item :param iterable: A sequence of items to process :returns: A generator of results in the same order """ threads = [] for i in iterable: threads.append(self.spawn(fn, i)) for future in threads: yield future.result() class EventletThreadPoolModel(ThreadPoolModel): """A ThreadPoolModel suitable for use with evenlet/greenthreads.""" DEFAULTSIZE = 1024 @staticmethod def get_threadpool_executor_class(): return futurist.GreenThreadPoolExecutor class NativeThreadPoolModel(ThreadPoolModel): """A ThreadPoolModel suitable for use with native threads.""" DEFAULTSIZE = 16 @staticmethod def get_threadpool_executor_class(): return futurist.ThreadPoolExecutor _THREADPOOL_MODEL = None def set_threadpool_model(thread_type): """Set the system-wide threadpool model. This sets the type of ThreadPoolModel to use globally in the process. It should be called very early in init, and only once. :param thread_type: A string indicating the threading type in use, either "eventlet" or "native" :raises: RuntimeError if the model is already set or some thread_type other than one of the supported ones is provided. """ global _THREADPOOL_MODEL if thread_type == 'native': model = NativeThreadPoolModel elif thread_type == 'eventlet': model = EventletThreadPoolModel else: raise RuntimeError( ('Invalid thread type %r ' '(must be "native" or "eventlet")') % (thread_type)) if _THREADPOOL_MODEL is model: # Re-setting the same model is fine... return if _THREADPOOL_MODEL is not None: # ...changing it is not. raise RuntimeError('Thread model is already set') LOG.info('Threadpool model set to %r', model.__name__) _THREADPOOL_MODEL = model def get_threadpool_model(): """Returns the system-wide threadpool model class. This must be called after set_threadpool_model() whenever some code needs to know what the threadpool implementation is. This may only be called after set_threadpool_model() has been called to set the desired threading mode. If it is called before the model is set, it will raise AssertionError. This would likely be the case if this got run in a test before the model was initialized, or if glance modules that use threading were imported and run from some other code without setting the model first. :raises: AssertionError if the model has not yet been set. """ global _THREADPOOL_MODEL assert _THREADPOOL_MODEL return _THREADPOOL_MODEL ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8503022 glance-29.0.0/glance/async_/flows/0000775000175000017500000000000000000000000016715 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/__init__.py0000664000175000017500000000000000000000000021014 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8503022 glance-29.0.0/glance/async_/flows/_internal_plugins/0000775000175000017500000000000000000000000022431 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/_internal_plugins/__init__.py0000664000175000017500000001663400000000000024554 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from stevedore import named from glance.i18n import _ CONF = cfg.CONF import_filtering_opts = [ cfg.ListOpt('allowed_schemes', item_type=cfg.types.String(quotes=True), bounds=True, default=['http', 'https'], help=_(""" Specify the "whitelist" of allowed url schemes for web-download. This option provides whitelisting of uri schemes that will be allowed when an end user imports an image using the web-download import method. The whitelist has priority such that if there is also a blacklist defined for schemes, the blacklist will be ignored. Host and port filtering, however, will be applied. See the Glance Administration Guide for more information. Possible values: * List containing normalized url schemes as they are returned from urllib.parse. For example ['ftp','https'] * Hint: leave the whitelist empty if you want the disallowed_schemes blacklist to be processed Related options: * disallowed_schemes * allowed_hosts * disallowed_hosts * allowed_ports * disallowed_ports """)), cfg.ListOpt('disallowed_schemes', item_type=cfg.types.String(quotes=True), bounds=True, default=[], help=_(""" Specify the "blacklist" of uri schemes disallowed for web-download. This option provides blacklisting of uri schemes that will be rejected when an end user imports an image using the web-download import method. Note that if a scheme whitelist is defined using the 'allowed_schemes' option, *this option will be ignored*. Host and port filtering, however, will be applied. See the Glance Administration Guide for more information. Possible values: * List containing normalized url schemes as they are returned from urllib.parse. For example ['ftp','https'] * By default the list is empty Related options: * allowed_schemes * allowed_hosts * disallowed_hosts * allowed_ports * disallowed_ports """)), cfg.ListOpt('allowed_hosts', item_type=cfg.types.HostAddress(), bounds=True, default=[], help=_(""" Specify the "whitelist" of allowed target hosts for web-download. This option provides whitelisting of hosts that will be allowed when an end user imports an image using the web-download import method. The whitelist has priority such that if there is also a blacklist defined for hosts, the blacklist will be ignored. The uri must have already passed scheme filtering before this host filter will be applied. If the uri passes, port filtering will then be applied. See the Glance Administration Guide for more information. Possible values: * List containing normalized hostname or ip like it would be returned in the urllib.parse netloc without the port * By default the list is empty * Hint: leave the whitelist empty if you want the disallowed_hosts blacklist to be processed Related options: * allowed_schemes * disallowed_schemes * disallowed_hosts * allowed_ports * disallowed_ports """)), cfg.ListOpt('disallowed_hosts', item_type=cfg.types.HostAddress(), bounds=True, default=[], help=_(""" Specify the "blacklist" of hosts disallowed for web-download. This option provides blacklisting of hosts that will be rejected when an end user imports an image using the web-download import method. Note that if a host whitelist is defined using the 'allowed_hosts' option, *this option will be ignored*. The uri must have already passed scheme filtering before this host filter will be applied. If the uri passes, port filtering will then be applied. See the Glance Administration Guide for more information. Possible values: * List containing normalized hostname or ip like it would be returned in the urllib.parse netloc without the port * By default the list is empty Related options: * allowed_schemes * disallowed_schemes * allowed_hosts * allowed_ports * disallowed_ports """)), cfg.ListOpt('allowed_ports', item_type=cfg.types.Integer(min=1, max=65535), bounds=True, default=[80, 443], help=_(""" Specify the "whitelist" of allowed ports for web-download. This option provides whitelisting of ports that will be allowed when an end user imports an image using the web-download import method. The whitelist has priority such that if there is also a blacklist defined for ports, the blacklist will be ignored. Note that scheme and host filtering have already been applied by the time a uri hits the port filter. See the Glance Administration Guide for more information. Possible values: * List containing ports as they are returned from urllib.parse netloc field. Thus the value is a list of integer values, for example [80, 443] * Hint: leave the whitelist empty if you want the disallowed_ports blacklist to be processed Related options: * allowed_schemes * disallowed_schemes * allowed_hosts * disallowed_hosts * disallowed_ports """)), cfg.ListOpt('disallowed_ports', item_type=cfg.types.Integer(min=1, max=65535), bounds=True, default=[], help=_(""" Specify the "blacklist" of disallowed ports for web-download. This option provides blacklisting of target ports that will be rejected when an end user imports an image using the web-download import method. Note that if a port whitelist is defined using the 'allowed_ports' option, *this option will be ignored*. Note that scheme and host filtering have already been applied by the time a uri hits the port filter. See the Glance Administration Guide for more information. Possible values: * List containing ports as they are returned from urllib.parse netloc field. Thus the value is a list of integer values, for example [22, 88] * By default this list is empty Related options: * allowed_schemes * disallowed_schemes * allowed_hosts * disallowed_hosts * allowed_ports """)), ] CONF.register_opts(import_filtering_opts, group='import_filtering_opts') def get_import_plugin(**kwargs): method_list = CONF.enabled_import_methods import_method = kwargs.get('import_req')['method']['name'] if import_method in method_list: import_method = import_method.replace("-", "_") task_list = [import_method] # TODO(jokke): Implement error handling of non-listed methods. extensions = named.NamedExtensionManager( 'glance.image_import.internal_plugins', names=task_list, name_order=True, invoke_on_load=True, invoke_kwds=kwargs) for extension in extensions.extensions: return extension.obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/_internal_plugins/base_download.py0000664000175000017500000001246700000000000025616 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # Copyright 2022 OVHCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import glance_store as store_api from glance_store import backend from oslo_config import cfg from oslo_log import log as logging from taskflow import task from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseDownload(task.Task, metaclass=abc.ABCMeta): default_provides = 'file_uri' def __init__(self, task_id, task_type, action_wrapper, stores, plugin_name): self.task_id = task_id self.task_type = task_type self.image_id = action_wrapper.image_id self.action_wrapper = action_wrapper self.stores = stores self._path = None self.plugin_name = plugin_name or 'Download' super(BaseDownload, self).__init__( name='%s-%s-%s' % (task_type, self.plugin_name, task_id)) # NOTE(abhishekk): Use reserved 'os_glance_staging_store' for # staging the data, the else part will be removed once old way # of configuring store is deprecated. if CONF.enabled_backends: self.store = store_api.get_store_from_store_identifier( 'os_glance_staging_store') else: if CONF.node_staging_uri is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Missing node_staging_uri: %(work_dir)s") % {'task_id': self.task_id, 'task_type': self.task_type, 'work_dir': CONF.node_staging_uri}) raise exception.BadTaskConfiguration(msg) self.store = self._build_store() def _build_store(self): # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're # forced to build our own config object, register the required options # (and by required I mean *ALL* of them, even the ones we don't want), # and create our own store instance by calling a private function. # This is certainly unfortunate but it's the best we can do until the # glance_store refactor is done. A good thing is that glance_store is # under our team's management and it gates on Glance so changes to # this API will (should?) break task's tests. # TODO(abhishekk): After removal of backend module from glance_store # need to change this to use multi_backend module. conf = cfg.ConfigOpts() try: backend.register_opts(conf) except cfg.DuplicateOptError: pass conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') # NOTE(flaper87): Do not even try to judge me for this... :( # With the glance_store refactor, this code will change, until # that happens, we don't have a better option and this is the # least worst one, IMHO. store = store_api.backend._load_store(conf, 'file') if store is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Could not load the filesystem store") % {'task_id': self.task_id, 'task_type': self.task_type}) raise exception.BadTaskConfiguration(msg) store.configure() return store def revert(self, result, **kwargs): LOG.error(_LE('Task: %(task_id)s failed to import image ' '%(image_id)s to the filesystem.'), {'task_id': self.task_id, 'image_id': self.image_id}) # NOTE(abhishekk): Revert image state back to 'queued' as # something went wrong. # NOTE(danms): If we failed to stage the image, then none # of the _ImportToStore() tasks could have run, so we need # to move all stores out of "importing" and into "failed". with self.action_wrapper as action: action.set_image_attribute(status='queued') action.remove_importing_stores(self.stores) action.add_failed_stores(self.stores) # NOTE(abhishekk): Deleting partial image data from staging area if self._path is not None: LOG.debug(('Deleting image %(image_id)s from staging ' 'area.'), {'image_id': self.image_id}) try: if CONF.enabled_backends: store_api.delete(self._path, None) else: store_api.delete_from_backend(self._path) except Exception: LOG.exception(_LE("Error reverting web/glance download " "task: %(task_id)s"), { 'task_id': self.task_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/_internal_plugins/copy_image.py0000664000175000017500000001337600000000000025131 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import glance_store as store_api from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import linear_flow as lf from taskflow import task from taskflow.types import failure from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) CONF = cfg.CONF class _CopyImage(task.Task): default_provides = 'file_uri' def __init__(self, task_id, task_type, image_repo, action_wrapper): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = action_wrapper.image_id self.action_wrapper = action_wrapper super(_CopyImage, self).__init__( name='%s-CopyImage-%s' % (task_type, task_id)) self.staging_store = store_api.get_store_from_store_identifier( 'os_glance_staging_store') def execute(self): with self.action_wrapper as action: return self._execute(action) def _execute(self, action): """Create temp file into store and return path to it :param action: Action wrapper """ # NOTE (abhishekk): If ``all_stores_must_succeed`` is set to True # and copying task fails then we keep data in staging area as it # is so that if second call is made to copy the same image then # no need to copy the data in staging area again. file_path = "%s/%s" % (getattr( CONF, 'os_glance_staging_store').filesystem_store_datadir, self.image_id) if os.path.exists(file_path): # NOTE (abhishekk): If previous copy-image operation is failed # due to power failure, network failure or any other reason and # the image data here is partial then clear the staging area and # re-stage the fresh image data. # Ref: https://bugs.launchpad.net/glance/+bug/1885003 size_in_staging = os.path.getsize(file_path) if action.image_size == size_in_staging: return file_path, 0 else: LOG.debug(("Found partial image data in staging " "%(fn)s, deleting it to re-stage " "again"), {'fn': file_path}) try: os.unlink(file_path) except OSError as e: LOG.error(_LE("Deletion of staged " "image data from %(fn)s has failed because " "[Errno %(en)d]"), {'fn': file_path, 'en': e.errno}) raise # At first search image in default_backend default_store = CONF.glance_store.default_backend for loc in action.image_locations: if loc['metadata'].get('store') == default_store: try: return self._copy_to_staging_store(loc) except store_api.exceptions.NotFound: msg = (_LE("Image not present in default store, searching " "in all glance-api specific available " "stores")) LOG.error(msg) break available_backends = CONF.enabled_backends for loc in action.image_locations: image_backend = loc['metadata'].get('store') if (image_backend in available_backends.keys() and image_backend != default_store): try: return self._copy_to_staging_store(loc) except store_api.exceptions.NotFound: LOG.error(_LE('Image: %(img_id)s is not present in store ' '%(store)s.'), {'img_id': self.image_id, 'store': image_backend}) continue raise exception.NotFound(_("Image not found in any configured " "store")) def _copy_to_staging_store(self, loc): store_backend = loc['metadata'].get('store') image_data, size = store_api.get(loc['url'], store_backend) msg = ("Found image, copying it in staging area") LOG.debug(msg) return self.staging_store.add(self.image_id, image_data, size)[0] def revert(self, result, **kwargs): if isinstance(result, failure.Failure): LOG.error(_LE('Task: %(task_id)s failed to copy image ' '%(image_id)s.'), {'task_id': self.task_id, 'image_id': self.image_id}) def get_flow(**kwargs): """Return task flow for web-download. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param image_id: Image ID. :param action_wrapper: An api_image_import.ActionWrapper. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') action_wrapper = kwargs.get('action_wrapper') return lf.Flow(task_type).add( _CopyImage(task_id, task_type, image_repo, action_wrapper), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/_internal_plugins/glance_download.py0000664000175000017500000001114700000000000026127 0ustar00zuulzuul00000000000000# Copyright 2022 OVHCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib.request from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from glance.async_.flows._internal_plugins import base_download from glance.async_ import utils from glance.common import exception from glance.common import utils as common_utils from glance.i18n import _, _LI, _LE LOG = logging.getLogger(__name__) CONF = cfg.CONF class _DownloadGlanceImage(base_download.BaseDownload): def __init__(self, context, task_id, task_type, action_wrapper, stores, glance_region, glance_image_id, glance_service_interface): self.context = context self.glance_region = glance_region self.glance_image_id = glance_image_id self.glance_service_interface = glance_service_interface super(_DownloadGlanceImage, self).__init__(task_id, task_type, action_wrapper, stores, 'GlanceDownload') def execute(self, image_size): """Create temp file into store and return path to it :param image_size: Glance Image Size retrieved from ImportMetadata task """ try: glance_endpoint = utils.get_glance_endpoint( self.context, self.glance_region, self.glance_service_interface) image_download_url = '%s/v2/images/%s/file' % ( glance_endpoint, self.glance_image_id) if not common_utils.validate_import_uri(image_download_url): LOG.debug("Processed URI for glance-download does not pass " "filtering: %s", image_download_url) msg = (_("Processed URI for glance-download does not pass " "filtering: %s") % image_download_url) raise exception.ImportTaskError(msg) LOG.info(_LI("Downloading glance image %s"), image_download_url) token = self.context.auth_token request = urllib.request.Request(image_download_url, headers={'X-Auth-Token': token}) data = urllib.request.urlopen(request) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( _LE("Task %(task_id)s failed with exception %(error)s"), { "error": encodeutils.exception_to_unicode(e), "task_id": self.task_id }) self._path, bytes_written = self.store.add(self.image_id, data, 0)[0:2] if bytes_written != image_size: msg = (_("Task %(task_id)s failed because downloaded data " "size %(data_size)i is different from expected %(" "expected)i") % {"task_id": self.task_id, "data_size": bytes_written, "expected": image_size}) raise exception.ImportTaskError(msg) return self._path def get_flow(**kwargs): """Return task flow for no-op. :param context: request context :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param image_id: Image ID :param source_region: Source region name """ context = kwargs.get('context') task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') action_wrapper = kwargs.get('action_wrapper') stores = kwargs.get('backend', [None]) # glance-download parameters import_req = kwargs.get('import_req') method = import_req.get('method') glance_region = method.get('glance_region') glance_image_id = method.get('glance_image_id') glance_service_interface = method.get('glance_service_interface') return lf.Flow(task_type).add( _DownloadGlanceImage(context, task_id, task_type, action_wrapper, stores, glance_region, glance_image_id, glance_service_interface), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/_internal_plugins/web_download.py0000664000175000017500000000674700000000000025465 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # Copyright 2022 OVHCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from glance.async_.flows._internal_plugins import base_download from glance.common import exception from glance.common.scripts import utils as script_utils from glance.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class _WebDownload(base_download.BaseDownload): def __init__(self, task_id, task_type, uri, action_wrapper, stores): self.uri = uri super(_WebDownload, self).__init__(task_id, task_type, action_wrapper, stores, 'WebDownload') def execute(self): """Create temp file into store and return path to it """ # NOTE(jokke): We've decided to use staging area for this task as # a way to expect users to configure a local store for pre-import # works on the image to happen. # # While using any path should be "technically" fine, it's not what # we recommend as the best solution. For more details on this, please # refer to the comment in the `_ImportToStore.execute` method. try: data = script_utils.get_image_data_iter(self.uri) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Task %(task_id)s failed with exception %(error)s", {"error": encodeutils.exception_to_unicode(e), "task_id": self.task_id}) self._path, bytes_written = self.store.add(self.image_id, data, 0)[0:2] try: content_length = int(data.headers['content-length']) if bytes_written != content_length: msg = (_("Task %(task_id)s failed because downloaded data " "size %(data_size)i is different from expected %(" "expected)i") % {"task_id": self.task_id, "data_size": bytes_written, "expected": content_length}) raise exception.ImportTaskError(msg) except (KeyError, ValueError): pass return self._path def get_flow(**kwargs): """Return task flow for web-download. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param uri: URI the image data is downloaded from. :param action_wrapper: An api_image_import.ActionWrapper. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') uri = kwargs.get('import_req')['method'].get('uri') action_wrapper = kwargs.get('action_wrapper') stores = kwargs.get('backend', [None]) return lf.Flow(task_type).add( _WebDownload(task_id, task_type, uri, action_wrapper, stores), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/api_image_import.py0000664000175000017500000012164200000000000022602 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import json import os import urllib.request import glance_store as store_api from glance_store import backend from glance_store import exceptions as store_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import units import taskflow from taskflow.patterns import linear_flow as lf from taskflow import retry from taskflow import task from glance.api import common as api_common import glance.async_.flows._internal_plugins as internal_plugins import glance.async_.flows.plugins as import_plugins from glance.async_ import utils from glance.common import exception from glance.common.scripts.image_import import main as image_import from glance.common.scripts import utils as script_utils from glance.common import store_utils from glance.i18n import _, _LE, _LI from glance.quota import keystone as ks_quota LOG = logging.getLogger(__name__) CONF = cfg.CONF api_import_opts = [ cfg.ListOpt('image_import_plugins', item_type=cfg.types.String(quotes=True), bounds=True, sample_default='[no_op]', default=[], help=_(""" Image import plugins to be enabled for task processing. Provide list of strings reflecting to the task Objects that should be included to the Image Import flow. The task objects needs to be defined in the 'glance/async/ flows/plugins/*' and may be implemented by OpenStack Glance project team, deployer or 3rd party. By default no plugins are enabled and to take advantage of the plugin model the list of plugins must be set explicitly in the glance-image-import.conf file. The allowed values for this option is comma separated list of object names in between ``[`` and ``]``. Possible values: * no_op (only logs debug level message that the plugin has been executed) * Any provided Task object name to be included in to the flow. """)), ] CONF.register_opts(api_import_opts, group='image_import_opts') glance_download_opts = [ cfg.ListOpt('extra_properties', item_type=cfg.types.String(quotes=True), bounds=True, default=[ 'hw_', 'trait:', 'os_distro', 'os_secure_boot', 'os_type'], help=_(""" Specify metadata prefix to be set on the target image when using glance-download. All other properties coming from the source image won't be set on the target image. If specified metadata does not exist on the source image it won't be set on the target image. Note you can't set the os_glance prefix as it is reserved by glance, so the related properties won't be set on the target image. Possible values: * List containing extra_properties prefixes: ['os_', 'architecture'] """)), ] CONF.register_opts(glance_download_opts, group='glance_download_properties') # TODO(jokke): We should refactor the task implementations so that we do not # need to duplicate what we have already for example in base_import.py. class _NoStoresSucceeded(exception.GlanceException): def __init__(self, message): super(_NoStoresSucceeded, self).__init__(message) class _InvalidGlanceDownloadImageStatus(exception.GlanceException): def __init__(self, message): super(_InvalidGlanceDownloadImageStatus, self).__init__(message) class ImportActionWrapper(object): """Wrapper for all the image metadata operations we do during an import. This is used to consolidate the changes we make to image metadata during an import operation, and can be used with an admin-capable repo to enable non-owner controlled modification of that data if desired. Use this as a context manager to make multiple changes followed by a save of the image in one operation. An _ImportActions object is yielded from the context manager, which defines the available operations. :param image_repo: The ImageRepo we should use to fetch/save the image :param image-id: The ID of the image we should be altering """ def __init__(self, image_repo, image_id, task_id): self._image_repo = image_repo self._image_id = image_id self._task_id = task_id def __enter__(self): self._image = self._image_repo.get(self._image_id) self._image_previous_status = self._image.status self._assert_task_lock(self._image) return _ImportActions(self._image) def __exit__(self, type, value, traceback): if type is not None: # NOTE(danms): Do not save the image if we raised in context return # NOTE(danms): If we were in the middle of a long-running # set_data() where someone else stole our lock, we may race # with them to update image locations and erase one that # someone else is working on. Checking the task lock here # again is not perfect exclusion, but in lieu of actual # thread-safe location updating, this at least reduces the # likelihood of that happening. self.assert_task_lock() if self._image_previous_status != self._image.status: LOG.debug('Image %(image_id)s status changing from ' '%(old_status)s to %(new_status)s', {'image_id': self._image_id, 'old_status': self._image_previous_status, 'new_status': self._image.status}) self._image_repo.save(self._image, self._image_previous_status) @property def image_id(self): return self._image_id def drop_lock_for_task(self): """Delete the import lock for our task. This is an atomic operation and thus does not require a context for the image save. Note that after calling this method, no further actions will be allowed on the image. :raises: NotFound if the image was not locked by the expected task. """ image = self._image_repo.get(self._image_id) self._image_repo.delete_property_atomic(image, 'os_glance_import_task', self._task_id) def _assert_task_lock(self, image): task_lock = image.extra_properties.get('os_glance_import_task') if task_lock != self._task_id: LOG.error('Image %(image)s import task %(task)s attempted to ' 'take action on image, but other task %(other)s holds ' 'the lock; Aborting.', {'image': self._image_id, 'task': self._task_id, 'other': task_lock}) raise exception.TaskAbortedError() def assert_task_lock(self): """Assert that we own the task lock on the image. :raises: TaskAbortedError if we do not """ image = self._image_repo.get(self._image_id) self._assert_task_lock(image) class _ImportActions(object): """Actions available for being performed on an image during import. This defines the available actions that can be performed on an image during import, which may be done with an image owned by another user. Do not instantiate this object directly, get it from ImportActionWrapper. """ IMPORTING_STORES_KEY = 'os_glance_importing_to_stores' IMPORT_FAILED_KEY = 'os_glance_failed_import' def __init__(self, image): self._image = image @property def image_id(self): return self._image.image_id @property def image_size(self): return self._image.size @property def image_locations(self): # Return a copy of this complex structure to make sure we do # not allow the plugin to mutate this underneath us for our # later save. If this needs to be a thing in the future, we # should have moderated access like all the other things here. return copy.deepcopy(self._image.locations) @property def image_disk_format(self): return self._image.disk_format @property def image_container_format(self): return self._image.container_format @property def image_extra_properties(self): return dict(self._image.extra_properties) @property def image_status(self): return self._image.status def merge_store_list(self, list_key, stores, subtract=False): stores = set([store for store in stores if store]) existing = set( self._image.extra_properties.get(list_key, '').split(',')) if subtract: if stores - existing: LOG.debug('Stores %(stores)s not in %(key)s for ' 'image %(image_id)s', {'stores': ','.join(sorted(stores - existing)), 'key': list_key, 'image_id': self.image_id}) merged_stores = existing - stores else: merged_stores = existing | stores stores_list = ','.join(sorted((store for store in merged_stores if store))) self._image.extra_properties[list_key] = stores_list LOG.debug('Image %(image_id)s %(key)s=%(stores)s', {'image_id': self.image_id, 'key': list_key, 'stores': stores_list}) def add_importing_stores(self, stores): """Add a list of stores to the importing list. Add stores to os_glance_importing_to_stores :param stores: A list of store names """ self.merge_store_list(self.IMPORTING_STORES_KEY, stores) def remove_importing_stores(self, stores): """Remove a list of stores from the importing list. Remove stores from os_glance_importing_to_stores :param stores: A list of store names """ self.merge_store_list(self.IMPORTING_STORES_KEY, stores, subtract=True) def add_failed_stores(self, stores): """Add a list of stores to the failed list. Add stores to os_glance_failed_import :param stores: A list of store names """ self.merge_store_list(self.IMPORT_FAILED_KEY, stores) def remove_failed_stores(self, stores): """Remove a list of stores from the failed list. Remove stores from os_glance_failed_import :param stores: A list of store names """ self.merge_store_list(self.IMPORT_FAILED_KEY, stores, subtract=True) def set_image_data(self, uri, task_id, backend, set_active, callback=None): """Populate image with data on a specific backend. This is used during an image import operation to populate the data in a given store for the image. If this object wraps an admin-capable image_repo, then this will be done with admin credentials on behalf of a user already determined to be able to perform this operation (such as a copy-image import of an existing image owned by another user). :param uri: Source URL for image data :param task_id: The task responsible for this operation :param backend: The backend store to target the data :param set_active: Whether or not to set the image to 'active' state after the operation completes :param callback: A callback function with signature: fn(action, chunk_bytes, total_bytes) which should be called while processing the image approximately every minute. """ if callback: callback = functools.partial(callback, self) return image_import.set_image_data(self._image, uri, task_id, backend=backend, set_active=set_active, callback=callback) def set_image_attribute(self, **attrs): """Set an image attribute. This allows setting various image attributes which will be saved upon exiting the ImportActionWrapper context. :param attrs: kwarg list of attributes to set on the image :raises: AttributeError if an attribute outside the set of allowed ones is present in attrs. """ allowed = ['status', 'disk_format', 'container_format', 'virtual_size', 'size'] for attr, value in attrs.items(): if attr not in allowed: raise AttributeError('Setting %s is not allowed' % attr) setattr(self._image, attr, value) def set_image_extra_properties(self, properties): """Merge values into image extra_properties. This allows a plugin to set additional properties on the image, as long as those are outside the reserved namespace. Any keys in the internal namespace will be dropped (and logged). :param properties: A dict of properties to be merged in """ for key, value in properties.items(): if key.startswith(api_common.GLANCE_RESERVED_NS): LOG.warning(('Dropping %(key)s=%(val)s during metadata ' 'injection for %(image)s'), {'key': key, 'val': value, 'image': self.image_id}) else: self._image.extra_properties[key] = value def remove_location_for_store(self, backend): """Remove a location from an image given a backend store. Given a backend store, remove the corresponding location from the image's set of locations. If the last location is removed, remove the image checksum, hash information, and size. :param backend: The backend store to remove from the image """ for i, location in enumerate(self._image.locations): if location.get('metadata', {}).get('store') == backend: try: self._image.locations.pop(i) except (store_exceptions.NotFound, store_exceptions.Forbidden): msg = (_("Error deleting from store %(store)s when " "reverting.") % {'store': backend}) LOG.warning(msg) # NOTE(yebinama): Some store drivers doesn't document which # exceptions they throw. except Exception: msg = (_("Unexpected exception when deleting from store " "%(store)s.") % {'store': backend}) LOG.warning(msg) else: if len(self._image.locations) == 0: self._image.checksum = None self._image.os_hash_algo = None self._image.os_hash_value = None self._image.size = None break def pop_extra_property(self, name): """Delete the named extra_properties value, if present. If the image.extra_properties dict contains the named key, delete it. :param name: The key to delete. """ self._image.extra_properties.pop(name, None) class _DeleteFromFS(task.Task): def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type super(_DeleteFromFS, self).__init__( name='%s-DeleteFromFS-%s' % (task_type, task_id)) def execute(self, file_path): """Remove file from the backend :param file_path: path to the file being deleted """ if CONF.enabled_backends: try: store_api.delete(file_path, 'os_glance_staging_store') except store_api.exceptions.NotFound as e: LOG.error(_("After upload to backend, deletion of staged " "image data from %(fn)s has failed because " "%(em)s"), {'fn': file_path, 'em': e.message}) else: # TODO(abhishekk): After removal of backend module from # glance_store need to change this to use multi_backend # module. file_path = file_path[7:] if os.path.exists(file_path): try: LOG.debug(_("After upload to the backend, deleting staged " "image data from %(fn)s"), {'fn': file_path}) os.unlink(file_path) except OSError as e: LOG.error(_("After upload to backend, deletion of staged " "image data from %(fn)s has failed because " "[Errno %(en)d]"), {'fn': file_path, 'en': e.errno}) else: LOG.warning(_("After upload to backend, deletion of staged " "image data has failed because " "it cannot be found at %(fn)s"), { 'fn': file_path}) class _ImageLock(task.Task): def __init__(self, task_id, task_type, action_wrapper): self.task_id = task_id self.task_type = task_type self.action_wrapper = action_wrapper super(_ImageLock, self).__init__( name='%s-ImageLock-%s' % (task_type, task_id)) def execute(self): self.action_wrapper.assert_task_lock() LOG.debug('Image %(image)s import task %(task)s lock confirmed', {'image': self.action_wrapper.image_id, 'task': self.task_id}) def revert(self, result, **kwargs): """Drop our claim on the image. If we have failed, we need to drop our import_task lock on the image so that something else can have a try. Note that we may have been preempted so we should only drop *our* lock. """ try: self.action_wrapper.drop_lock_for_task() except exception.NotFound: LOG.warning('Image %(image)s import task %(task)s lost its ' 'lock during execution!', {'image': self.action_wrapper.image_id, 'task': self.task_id}) else: LOG.debug('Image %(image)s import task %(task)s dropped ' 'its lock after failure', {'image': self.action_wrapper.image_id, 'task': self.task_id}) class _VerifyStaging(task.Task): # NOTE(jokke): This could be also for example "staging_path" but to # keep this compatible with other flows we want to stay consistent # with base_import default_provides = 'file_path' def __init__(self, task_id, task_type, task_repo, uri): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.uri = uri super(_VerifyStaging, self).__init__( name='%s-ConfigureStaging-%s' % (task_type, task_id)) # NOTE(jokke): If we want to use other than 'file' store in the # future, this is one thing that needs to change. try: uri.index('file:///', 0) except ValueError: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Value of node_staging_uri must be " " in format 'file://'") % {'task_id': self.task_id, 'task_type': self.task_type}) raise exception.BadTaskConfiguration(msg) if not CONF.enabled_backends: # NOTE(jokke): We really don't need the store for anything but # verifying that we actually can build the store will allow us to # fail the flow early with clear message why that happens. self._build_store() def _build_store(self): # TODO(abhishekk): After removal of backend module from glance_store # need to change this to use multi_backend module. # NOTE(jokke): If we want to use some other store for staging, we can # implement the logic more general here. For now this should do. # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're # forced to build our own config object, register the required options # (and by required I mean *ALL* of them, even the ones we don't want), # and create our own store instance by calling a private function. # This is certainly unfortunate but it's the best we can do until the # glance_store refactor is done. A good thing is that glance_store is # under our team's management and it gates on Glance so changes to # this API will (should?) break task's tests. conf = cfg.ConfigOpts() try: backend.register_opts(conf) except cfg.DuplicateOptError: pass conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') # NOTE(flaper87): Do not even try to judge me for this... :( # With the glance_store refactor, this code will change, until # that happens, we don't have a better option and this is the # least worst one, IMHO. store = backend._load_store(conf, 'file') try: store.configure() except AttributeError: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Could not load the filesystem store") % {'task_id': self.task_id, 'task_type': self.task_type}) raise exception.BadTaskConfiguration(msg) def execute(self): """Test the backend store and return the 'file_path'""" return self.uri class _ImportToStore(task.Task): def __init__(self, task_id, task_type, task_repo, action_wrapper, uri, backend, all_stores_must_succeed, set_active): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.action_wrapper = action_wrapper self.uri = uri self.backend = backend self.all_stores_must_succeed = all_stores_must_succeed self.set_active = set_active self.last_status = 0 super(_ImportToStore, self).__init__( name='%s-ImportToStore-%s' % (task_type, task_id)) def execute(self, file_path=None): """Bringing the imported image to back end store :param file_path: path to the image file """ # NOTE(flaper87): Let's dance... and fall # # Unfortunately, because of the way our domain layers work and # the checks done in the FS store, we can't simply rename the file # and set the location. To do that, we'd have to duplicate the logic # of every and each of the domain factories (quota, location, etc) # and we'd also need to hack the FS store to prevent it from raising # a "duplication path" error. I'd rather have this task copying the # image bits one more time than duplicating all that logic. # # Since I don't think this should be the definitive solution, I'm # leaving the code below as a reference for what should happen here # once the FS store and domain code will be able to handle this case. # # if file_path is None: # image_import.set_image_data(image, self.uri, None) # return # NOTE(flaper87): Don't assume the image was stored in the # work_dir. Think in the case this path was provided by another task. # Also, lets try to neither assume things nor create "logic" # dependencies between this task and `_ImportToFS` # # base_path = os.path.dirname(file_path.split("file://")[-1]) # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say # hopefully because nothing prevents the user to use the same # FS store path as a work dir # # image_path = os.path.join(base_path, image_id) # # if (base_path == CONF.glance_store.filesystem_store_datadir or # base_path in CONF.glance_store.filesystem_store_datadirs): # os.rename(file_path, image_path) # # image_import.set_image_data(image, image_path, None) # NOTE(jokke): The different options here are kind of pointless as we # will need the file path anyways for our delete workflow for now. # For future proofing keeping this as is. with self.action_wrapper as action: self._execute(action, file_path) def _execute(self, action, file_path): self.last_status = timeutils.now() if action.image_status == "deleted": raise exception.ImportTaskError("Image has been deleted, aborting" " import.") try: action.set_image_data(file_path or self.uri, self.task_id, backend=self.backend, set_active=self.set_active, callback=self._status_callback) # NOTE(yebinama): set_image_data catches Exception and raises from # them. Can't be more specific on exceptions caught. except Exception: if self.all_stores_must_succeed: raise msg = (_("%(task_id)s of %(task_type)s failed but since " "all_stores_must_succeed is set to false, continue.") % {'task_id': self.task_id, 'task_type': self.task_type}) LOG.warning(msg) if self.backend is not None: action.add_failed_stores([self.backend]) if self.backend is not None: action.remove_importing_stores([self.backend]) def _status_callback(self, action, chunk_bytes, total_bytes): # NOTE(danms): Only log status every five minutes if timeutils.now() - self.last_status > 300: LOG.debug('Image import %(image_id)s copied %(copied)i MiB', {'image_id': action.image_id, 'copied': total_bytes // units.Mi}) self.last_status = timeutils.now() task = script_utils.get_task(self.task_repo, self.task_id) if task is None: LOG.error( 'Status callback for task %(task)s found no task object!', {'task': self.task_id}) raise exception.TaskNotFound(self.task_id) if task.status != 'processing': LOG.error('Task %(task)s expected "processing" status, ' 'but found "%(status)s"; aborting.') raise exception.TaskAbortedError() task.message = _('Copied %i MiB') % (total_bytes // units.Mi) self.task_repo.save(task) def revert(self, result, **kwargs): """ Remove location from image in case of failure :param result: taskflow result object """ with self.action_wrapper as action: action.remove_location_for_store(self.backend) action.remove_importing_stores([self.backend]) if isinstance(result, taskflow.types.failure.Failure): # We are the store that failed, so add us to the failed list action.add_failed_stores([self.backend]) class _VerifyImageState(task.Task): def __init__(self, task_id, task_type, action_wrapper, import_method): self.task_id = task_id self.task_type = task_type self.action_wrapper = action_wrapper self.import_method = import_method super(_VerifyImageState, self).__init__( name='%s-VerifyImageState-%s' % (task_type, task_id)) def execute(self): """Verify we have active image """ with self.action_wrapper as action: if action.image_status != 'active': raise _NoStoresSucceeded(_('None of the uploads finished!')) def revert(self, result, **kwargs): """Set back to queued if this wasn't copy-image job.""" with self.action_wrapper as action: if self.import_method != 'copy-image': action.set_image_attribute(status='queued') class _CompleteTask(task.Task): def __init__(self, task_id, task_type, task_repo, action_wrapper): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.action_wrapper = action_wrapper super(_CompleteTask, self).__init__( name='%s-CompleteTask-%s' % (task_type, task_id)) def _finish_task(self, task): try: task.succeed({'image_id': self.action_wrapper.image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " "%(e)s") LOG.exception(log_msg, {'exc_type': str(type(e)), 'e': encodeutils.exception_to_unicode(e), 'task_id': task.task_id}) err_msg = _("Error: %(exc_type)s: %(e)s") task.fail(err_msg % {'exc_type': str(type(e)), 'e': encodeutils.exception_to_unicode(e)}) finally: self.task_repo.save(task) def _drop_lock(self): try: self.action_wrapper.drop_lock_for_task() except exception.NotFound: # NOTE(danms): This would be really bad, but there is probably # not much point in reverting all the way back if we got this # far. Log the carnage for forensics. LOG.error('Image %(image)s import task %(task)s did not hold the ' 'lock upon completion!', {'image': self.action_wrapper.image_id, 'task': self.task_id}) def execute(self): """Finishing the task flow """ task = script_utils.get_task(self.task_repo, self.task_id) if task is not None: self._finish_task(task) self._drop_lock() LOG.info(_LI("%(task_id)s of %(task_type)s completed"), {'task_id': self.task_id, 'task_type': self.task_type}) class _ImportMetadata(task.Task): default_provides = 'image_size' def __init__(self, task_id, task_type, context, action_wrapper, import_req): self.task_id = task_id self.task_type = task_type self.context = context self.action_wrapper = action_wrapper self.import_req = import_req self.props_to_copy = CONF.glance_download_properties.extra_properties # We store the properties that will be set in case we are reverting self.properties = {} self.old_properties = {} self.old_attributes = {} super(_ImportMetadata, self).__init__( name='%s-ImportMetdata-%s' % (task_type, task_id)) def execute(self): try: glance_endpoint = utils.get_glance_endpoint( self.context, self.import_req['method']['glance_region'], self.import_req['method']['glance_service_interface']) glance_image_id = self.import_req['method']['glance_image_id'] image_download_metadata_url = '%s/v2/images/%s' % ( glance_endpoint, glance_image_id) LOG.info(_LI("Fetching glance image metadata from remote host %s"), image_download_metadata_url) token = self.context.auth_token request = urllib.request.Request(image_download_metadata_url, headers={'X-Auth-Token': token}) with urllib.request.urlopen(request) as payload: data = json.loads(payload.read().decode('utf-8')) if data.get('status') != 'active': raise _InvalidGlanceDownloadImageStatus( _('Source image status should be active instead of %s') % data['status']) for key, value in data.items(): for metadata in self.props_to_copy: if key.startswith(metadata): self.properties[key] = value with self.action_wrapper as action: # Save the old properties in case we need to revert self.old_properties = action.image_extra_properties self.old_attributes = { 'container_format': action.image_container_format, 'disk_format': action.image_disk_format, } # Set disk_format and container_format attributes action.set_image_attribute( disk_format=data['disk_format'], container_format=data['container_format']) # Set extra propoerties if self.properties: action.set_image_extra_properties(self.properties) try: return int(data['size']) except (ValueError, KeyError): raise exception.ImportTaskError( _('Size attribute of remote image %s could not be ' 'determined.' % glance_image_id)) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( "Task %(task_id)s failed with exception %(error)s", { "error": encodeutils.exception_to_unicode(e), "task_id": self.task_id }) def revert(self, result, **kwargs): """Revert the extra properties set and set the image in queued""" with self.action_wrapper as action: for image_property in self.properties: if image_property not in self.old_properties: action.pop_extra_property(image_property) action.set_image_extra_properties(self.old_properties) action.set_image_attribute(status='queued', **self.old_attributes) def assert_quota(context, task_repo, task_id, stores, action_wrapper, enforce_quota_fn, **enforce_kwargs): try: enforce_quota_fn(context, context.owner, **enforce_kwargs) except exception.LimitExceeded as e: with excutils.save_and_reraise_exception(): with action_wrapper as action: action.remove_importing_stores(stores) if action.image_status == 'importing': action.set_image_attribute(status='queued') action_wrapper.drop_lock_for_task() task = script_utils.get_task(task_repo, task_id) if task is None: LOG.error(_LE('Failed to find task %r to update after ' 'quota failure'), task_id) else: task.fail(str(e)) task_repo.save(task) def get_flow(**kwargs): """Return task flow :param task_id: Task ID :param task_type: Type of the task :param task_repo: Task repo :param image_repo: Image repository used :param image_id: ID of the Image to be processed :param uri: uri for the image file """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') task_repo = kwargs.get('task_repo') image_repo = kwargs.get('image_repo') admin_repo = kwargs.get('admin_repo') image_id = kwargs.get('image_id') import_req = kwargs.get('import_req') import_method = import_req['method']['name'] uri = import_req['method'].get('uri') stores = kwargs.get('backend', [None]) all_stores_must_succeed = import_req.get( 'all_stores_must_succeed', True) context = kwargs.get('context') separator = '' if not CONF.enabled_backends and not CONF.node_staging_uri.endswith('/'): separator = '/' # Instantiate an action wrapper with the admin repo if we got one, # otherwise with the regular repo. action_wrapper = ImportActionWrapper(admin_repo or image_repo, image_id, task_id) kwargs['action_wrapper'] = action_wrapper if not uri and import_method in ['glance-direct', 'copy-image']: if CONF.enabled_backends: separator, staging_dir = store_utils.get_dir_separator() uri = separator.join((staging_dir, str(image_id))) else: uri = separator.join((CONF.node_staging_uri, str(image_id))) flow = lf.Flow(task_type, retry=retry.AlwaysRevert()) flow.add(_ImageLock(task_id, task_type, action_wrapper)) if import_method in ['web-download', 'copy-image', 'glance-download']: if import_method == 'glance-download': flow.add(_ImportMetadata(task_id, task_type, context, action_wrapper, import_req)) internal_plugin = internal_plugins.get_import_plugin(**kwargs) flow.add(internal_plugin) if CONF.enabled_backends: separator, staging_dir = store_utils.get_dir_separator() file_uri = separator.join((staging_dir, str(image_id))) else: file_uri = separator.join((CONF.node_staging_uri, str(image_id))) else: file_uri = uri flow.add(_VerifyStaging(task_id, task_type, task_repo, file_uri)) # Note(jokke): The plugins were designed to act on the image data or # metadata during the import process before the image goes active. It # does not make sense to try to execute them during 'copy-image'. if import_method != 'copy-image': for plugin in import_plugins.get_import_plugins(**kwargs): flow.add(plugin) else: LOG.debug("Skipping plugins on 'copy-image' job.") for idx, store in enumerate(stores, 1): set_active = (not all_stores_must_succeed) or (idx == len(stores)) if import_method == 'copy-image': set_active = False task_name = task_type + "-" + (store or "") import_task = lf.Flow(task_name) import_to_store = _ImportToStore(task_id, task_name, task_repo, action_wrapper, file_uri, store, all_stores_must_succeed, set_active) import_task.add(import_to_store) flow.add(import_task) delete_task = lf.Flow(task_type).add(_DeleteFromFS(task_id, task_type)) flow.add(delete_task) verify_task = _VerifyImageState(task_id, task_type, action_wrapper, import_method) flow.add(verify_task) complete_task = _CompleteTask(task_id, task_type, task_repo, action_wrapper) flow.add(complete_task) with action_wrapper as action: if import_method != 'copy-image': action.set_image_attribute(status='importing') image_size = (action.image_size or 0) // units.Mi action.add_importing_stores(stores) action.remove_failed_stores(stores) action.pop_extra_property('os_glance_stage_host') # After we have marked the image as intended, check quota to make # sure we are not over a limit, otherwise we roll back. if import_method == 'glance-direct': # We know the size of the image in staging, so we can check # against available image_size_total quota. assert_quota(kwargs['context'], task_repo, task_id, stores, action_wrapper, ks_quota.enforce_image_size_total, delta=image_size) elif import_method in ('copy-image', 'web-download', 'glance-download'): # The copy-image, web-download and glance-download methods will use # staging space to do their work, so check that quota. assert_quota(kwargs['context'], task_repo, task_id, stores, action_wrapper, ks_quota.enforce_image_staging_total, delta=image_size) assert_quota(kwargs['context'], task_repo, task_id, stores, action_wrapper, ks_quota.enforce_image_count_uploading) return flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/base_import.py0000664000175000017500000005137500000000000021606 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import glance_store as store_api from glance_store import backend from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from stevedore import named from taskflow.patterns import linear_flow as lf from taskflow import retry from taskflow import task from taskflow.types import failure from glance.async_ import utils from glance.common import exception from glance.common.scripts.image_import import main as image_import from glance.common.scripts import utils as script_utils from glance.i18n import _, _LE, _LI LOG = logging.getLogger(__name__) CONF = cfg.CONF class _CreateImage(task.Task): default_provides = 'image_id' def __init__(self, task_id, task_type, task_repo, image_repo, image_factory): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory super(_CreateImage, self).__init__( name='%s-CreateImage-%s' % (task_type, task_id)) def execute(self): task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return task_input = script_utils.unpack_task_input(task) image = image_import.create_image( self.image_repo, self.image_factory, task_input.get('image_properties'), self.task_id) LOG.debug("Task %(task_id)s created image %(image_id)s", {'task_id': task.task_id, 'image_id': image.image_id}) return image.image_id def revert(self, *args, **kwargs): # TODO(NiallBunting): Deleting the image like this could be considered # a brute force way of reverting images. It may be worth checking if # data has been written. result = kwargs.get('result', None) if result is not None: if kwargs.get('flow_failures', None) is not None: image = self.image_repo.get(result) LOG.debug("Deleting image whilst reverting.") image.delete() self.image_repo.remove(image) class _ImportToFS(task.Task): default_provides = 'file_path' def __init__(self, task_id, task_type, task_repo, uri): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.uri = uri super(_ImportToFS, self).__init__( name='%s-ImportToFS-%s' % (task_type, task_id)) # NOTE(abhishekk): Use reserved 'os_glance_tasks_store' for tasks, # the else part will be removed once old way of configuring store # is deprecated. if CONF.enabled_backends: self.store = store_api.get_store_from_store_identifier( 'os_glance_tasks_store') else: if CONF.task.work_dir is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Missing work dir: %(work_dir)s") % {'task_id': self.task_id, 'task_type': self.task_type, 'work_dir': CONF.task.work_dir}) raise exception.BadTaskConfiguration(msg) self.store = self._build_store() def _build_store(self): # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're # forced to build our own config object, register the required options # (and by required I mean *ALL* of them, even the ones we don't want), # and create our own store instance by calling a private function. # This is certainly unfortunate but it's the best we can do until the # glance_store refactor is done. A good thing is that glance_store is # under our team's management and it gates on Glance so changes to # this API will (should?) break task's tests. conf = cfg.ConfigOpts() backend.register_opts(conf) conf.set_override('filesystem_store_datadir', CONF.task.work_dir, group='glance_store') # NOTE(flaper87): Do not even try to judge me for this... :( # With the glance_store refactor, this code will change, until # that happens, we don't have a better option and this is the # least worst one, IMHO. store = backend._load_store(conf, 'file') if store is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Could not load the filesystem store") % {'task_id': self.task_id, 'task_type': self.task_type}) raise exception.BadTaskConfiguration(msg) store.configure() return store def execute(self, image_id): """Create temp file into store and return path to it :param image_id: Glance Image ID """ # NOTE(flaper87): We've decided to use a separate `work_dir` for # this task - and tasks coming after this one - as a way to expect # users to configure a local store for pre-import works on the image # to happen. # # While using any path should be "technically" fine, it's not what # we recommend as the best solution. For more details on this, please # refer to the comment in the `_ImportToStore.execute` method. data = script_utils.get_image_data_iter(self.uri) path = self.store.add(image_id, data, 0, context=None)[0] try: # NOTE(flaper87): Consider moving this code to a common # place that other tasks can consume as well. stdout, stderr = putils.trycmd('qemu-img', 'info', '--output=json', path, prlimit=utils.QEMU_IMG_PROC_LIMITS, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = _LE('Failed to execute security checks on the image ' '%(task_id)s: %(exc)s') LOG.error(msg, {'task_id': self.task_id, 'exc': exc_message}) metadata = json.loads(stdout) backing_file = metadata.get('backing-filename') if backing_file is not None: msg = _("File %(path)s has invalid backing file " "%(bfile)s, aborting.") % {'path': path, 'bfile': backing_file} raise RuntimeError(msg) try: data_file = metadata['format-specific']['data']['data-file'] except KeyError: data_file = None if data_file is not None: msg = _("File %(path)s has invalid data-file " "%(dfile)s, aborting.") % {"path": path, "dfile": data_file} raise RuntimeError(msg) return path def revert(self, image_id, result, **kwargs): if isinstance(result, failure.Failure): LOG.exception(_LE('Task: %(task_id)s failed to import image ' '%(image_id)s to the filesystem.'), {'task_id': self.task_id, 'image_id': image_id}) return if os.path.exists(result.split("file://")[-1]): if CONF.enabled_backends: store_api.delete(result, 'os_glance_tasks_store') else: store_api.delete_from_backend(result) class _DeleteFromFS(task.Task): def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type super(_DeleteFromFS, self).__init__( name='%s-DeleteFromFS-%s' % (task_type, task_id)) def execute(self, file_path): """Remove file from the backend :param file_path: path to the file being deleted """ if CONF.enabled_backends: store_api.delete(file_path, 'os_glance_tasks_store') else: store_api.delete_from_backend(file_path) class _ImportToStore(task.Task): def __init__(self, task_id, task_type, image_repo, uri, backend): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.uri = uri self.backend = backend super(_ImportToStore, self).__init__( name='%s-ImportToStore-%s' % (task_type, task_id)) def execute(self, image_id, file_path=None): """Bringing the introspected image to back end store :param image_id: Glance Image ID :param file_path: path to the image file """ # NOTE(flaper87): There are a couple of interesting bits in the # interaction between this task and the `_ImportToFS` one. I'll try # to cover them in this comment. # # NOTE(flaper87): # `_ImportToFS` downloads the image to a dedicated `work_dir` which # needs to be configured in advance (please refer to the config option # docs for more info). The motivation behind this is also explained in # the `_ImportToFS.execute` method. # # Due to the fact that we have an `_ImportToFS` task which downloads # the image data already, we need to be as smart as we can in this task # to avoid downloading the data several times and reducing the copy or # write times. There are several scenarios where the interaction # between this task and `_ImportToFS` could be improved. All these # scenarios assume the `_ImportToFS` task has been executed before # and/or in a more abstract scenario, that `file_path` is being # provided. # # Scenario 1: FS Store is Remote, introspection enabled, # conversion disabled # # In this scenario, the user would benefit from having the scratch path # being the same path as the fs store. Only one write would happen and # an extra read will happen in order to introspect the image. Note that # this read is just for the image headers and not the entire file. # # Scenario 2: FS Store is remote, introspection enabled, # conversion enabled # # In this scenario, the user would benefit from having a *local* store # into which the image can be converted. This will require downloading # the image locally, converting it and then copying the converted image # to the remote store. # # Scenario 3: FS Store is local, introspection enabled, # conversion disabled # Scenario 4: FS Store is local, introspection enabled, # conversion enabled # # In both these scenarios the user shouldn't care if the FS # store path and the work dir are the same, therefore probably # benefit, about the scratch path and the FS store being the # same from a performance perspective. Space wise, regardless # of the scenario, the user will have to account for it in # advance. # # Lets get to it and identify the different scenarios in the # implementation image = self.image_repo.get(image_id) image.status = 'saving' self.image_repo.save(image) # NOTE(flaper87): Let's dance... and fall # # Unfortunately, because of the way our domain layers work and # the checks done in the FS store, we can't simply rename the file # and set the location. To do that, we'd have to duplicate the logic # of every and each of the domain factories (quota, location, etc) # and we'd also need to hack the FS store to prevent it from raising # a "duplication path" error. I'd rather have this task copying the # image bits one more time than duplicating all that logic. # # Since I don't think this should be the definitive solution, I'm # leaving the code below as a reference for what should happen here # once the FS store and domain code will be able to handle this case. # # if file_path is None: # image_import.set_image_data(image, self.uri, None) # return # NOTE(flaper87): Don't assume the image was stored in the # work_dir. Think in the case this path was provided by another task. # Also, lets try to neither assume things nor create "logic" # dependencies between this task and `_ImportToFS` # # base_path = os.path.dirname(file_path.split("file://")[-1]) # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say # hopefully because nothing prevents the user to use the same # FS store path as a work dir # # image_path = os.path.join(base_path, image_id) # # if (base_path == CONF.glance_store.filesystem_store_datadir or # base_path in CONF.glance_store.filesystem_store_datadirs): # os.rename(file_path, image_path) # # image_import.set_image_data(image, image_path, None) try: image_import.set_image_data(image, file_path or self.uri, self.task_id, backend=self.backend) except IOError as e: msg = (_('Uploading the image failed due to: %(exc)s') % {'exc': encodeutils.exception_to_unicode(e)}) LOG.error(msg) raise exception.UploadException(message=msg) # NOTE(flaper87): We need to save the image again after the locations # have been set in the image. self.image_repo.save(image) class _SaveImage(task.Task): def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_SaveImage, self).__init__( name='%s-SaveImage-%s' % (task_type, task_id)) def execute(self, image_id): """Transition image status to active :param image_id: Glance Image ID """ new_image = self.image_repo.get(image_id) if new_image.status == 'saving': # NOTE(flaper87): THIS IS WRONG! # we should be doing atomic updates to avoid # race conditions. This happens in other places # too. new_image.status = 'active' self.image_repo.save(new_image) class _CompleteTask(task.Task): def __init__(self, task_id, task_type, task_repo): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo super(_CompleteTask, self).__init__( name='%s-CompleteTask-%s' % (task_type, task_id)) def execute(self, image_id): """Finishing the task flow :param image_id: Glance Image ID """ task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return try: task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary log_msg = _LE("Task ID %(task_id)s failed. Error: %(exc_type)s: " "%(e)s") LOG.exception(log_msg, {'exc_type': str(type(e)), 'e': encodeutils.exception_to_unicode(e), 'task_id': task.task_id}) err_msg = _("Error: %(exc_type)s: %(e)s") task.fail(err_msg % {'exc_type': str(type(e)), 'e': encodeutils.exception_to_unicode(e)}) finally: self.task_repo.save(task) LOG.info(_LI("%(task_id)s of %(task_type)s completed"), {'task_id': self.task_id, 'task_type': self.task_type}) def _get_import_flows(**kwargs): # NOTE(flaper87): Until we have a better infrastructure to enable # and disable tasks plugins, hard-code the tasks we know exist, # instead of loading everything from the namespace. This guarantees # both, the load order of these plugins and the fact that no random # plugins will be added/loaded until we feel comfortable with this. # Future patches will keep using NamedExtensionManager but they'll # rely on a config option to control this process. extensions = named.NamedExtensionManager('glance.flows.import', names=['ovf_process', 'convert', 'introspect'], name_order=True, invoke_on_load=True, invoke_kwds=kwargs) for ext in extensions.extensions: yield ext.obj def get_flow(**kwargs): """Return task flow :param task_id: Task ID :param task_type: Type of the task :param task_repo: Task repo :param image_repo: Image repository used :param image_factory: Glance Image Factory :param uri: uri for the image file """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') task_repo = kwargs.get('task_repo') image_repo = kwargs.get('image_repo') image_factory = kwargs.get('image_factory') uri = kwargs.get('uri') backend = kwargs.get('backend') flow = lf.Flow(task_type, retry=retry.AlwaysRevert()).add( _CreateImage(task_id, task_type, task_repo, image_repo, image_factory)) import_to_store = _ImportToStore(task_id, task_type, image_repo, uri, backend) try: # NOTE(flaper87): ImportToLocal and DeleteFromLocal shouldn't be here. # Ideally, we should have the different import flows doing this for us # and this function should clean up duplicated tasks. For example, say # 2 flows need to have a local copy of the image - ImportToLocal - in # order to be able to complete the task - i.e Introspect-. In that # case, the introspect.get_flow call should add both, ImportToLocal and # DeleteFromLocal, to the flow and this function will reduce the # duplicated calls to those tasks by creating a linear flow that # ensures those are called before the other tasks. For now, I'm # keeping them here, though. limbo = lf.Flow(task_type).add(_ImportToFS(task_id, task_type, task_repo, uri)) for subflow in _get_import_flows(**kwargs): limbo.add(subflow) # NOTE(flaper87): We have hard-coded 2 tasks, # if there aren't more than 2, it means that # no subtask has been registered. if len(limbo) > 1: flow.add(limbo) # NOTE(flaper87): Until this implementation gets smarter, # make sure ImportToStore is called *after* the imported # flow stages. If not, the image will be set to saving state # invalidating tasks like Introspection or Convert. flow.add(import_to_store) # NOTE(flaper87): Since this is an "optional" task but required # when `limbo` is executed, we're adding it in its own subflow # to isolate it from the rest of the flow. delete_flow = lf.Flow(task_type).add(_DeleteFromFS(task_id, task_type)) flow.add(delete_flow) else: flow.add(import_to_store) except exception.BadTaskConfiguration as exc: # NOTE(flaper87): If something goes wrong with the load of # import tasks, make sure we go on. LOG.error(_LE('Bad task configuration: %s'), exc.message) flow.add(import_to_store) flow.add( _SaveImage(task_id, task_type, image_repo), _CompleteTask(task_id, task_type, task_repo) ) return flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/convert.py0000664000175000017500000001351200000000000020751 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import linear_flow as lf from taskflow import task from glance.i18n import _, _LW LOG = logging.getLogger(__name__) convert_task_opts = [ # NOTE: This configuration option requires the operator to explicitly set # an image conversion format. There being no sane default due to the # dependency on the environment in which OpenStack is running, we do not # mark this configuration option as "required". Rather a warning message # is given to the operator, prompting for an image conversion format to # be set. cfg.StrOpt('conversion_format', sample_default='raw', choices=('qcow2', 'raw', 'vmdk'), help=_(""" Set the desired image conversion format. Provide a valid image format to which you want images to be converted before they are stored for consumption by Glance. Appropriate image format conversions are desirable for specific storage backends in order to facilitate efficient handling of bandwidth and usage of the storage infrastructure. By default, ``conversion_format`` is not set and must be set explicitly in the configuration file. The allowed values for this option are ``raw``, ``qcow2`` and ``vmdk``. The ``raw`` format is the unstructured disk format and should be chosen when RBD or Ceph storage backends are used for image storage. ``qcow2`` is supported by the QEMU emulator that expands dynamically and supports Copy on Write. The ``vmdk`` is another common disk format supported by many common virtual machine monitors like VMWare Workstation. Possible values: * qcow2 * raw * vmdk Related options: * disk_formats """)), ] CONF = cfg.CONF # NOTE(flaper87): Registering under the taskflow_executor section # for now. It seems a waste to have a whole section dedicated to a # single task with a single option. CONF.register_opts(convert_task_opts, group='taskflow_executor') class _Convert(task.Task): conversion_missing_warned = False def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Convert, self).__init__( name='%s-Convert-%s' % (task_type, task_id)) def execute(self, image_id, file_path): # NOTE(flaper87): A format must be explicitly # specified. There's no "sane" default for this # because the dest format may work differently depending # on the environment OpenStack is running in. abs_file_path = file_path.split("file://")[-1] conversion_format = CONF.taskflow_executor.conversion_format if conversion_format is None: if not _Convert.conversion_missing_warned: msg = _LW('The conversion format is None, please add a value ' 'for it in the config file for this task to ' 'work: %s') LOG.warning(msg, self.task_id) _Convert.conversion_missing_warned = True return image_obj = self.image_repo.get(image_id) src_format = image_obj.disk_format # TODO(flaper87): Check whether the image is in the desired # format already. Probably using `qemu-img` just like the # `Introspection` task. # NOTE(hemanthm): We add '-f' parameter to the convert command here so # that the image format need not be inferred by qemu utils. This # shields us from being vulnerable to an attack vector described here # https://bugs.launchpad.net/glance/+bug/1449062 data_dir = CONF.task.work_dir # NOTE(abhishekk): Use reserved 'os_glance_tasks_store' for tasks. if CONF.enabled_backends: data_dir = getattr( CONF, 'os_glance_tasks_store').filesystem_store_datadir dest_path = os.path.join(data_dir, "%s.converted" % image_id) stdout, stderr = putils.trycmd('qemu-img', 'convert', '-f', src_format, '-O', conversion_format, file_path, dest_path, log_errors=putils.LOG_ALL_ERRORS) if stderr: raise RuntimeError(stderr) os.unlink(abs_file_path) os.rename(dest_path, abs_file_path) return file_path def revert(self, image_id, result=None, **kwargs): # NOTE(flaper87): If result is None, it probably # means this task failed. Otherwise, we would have # a result from its execution. if result is None: return fs_path = result.split("file://")[-1] if os.path.exists(fs_path): os.remove(fs_path) def get_flow(**kwargs): """Return task flow for converting images to different formats. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') return lf.Flow(task_type).add( _Convert(task_id, task_type, image_repo), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/introspect.py0000664000175000017500000000673600000000000021475 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from glance.async_ import utils from glance.i18n import _LE LOG = logging.getLogger(__name__) class _Introspect(utils.OptionalTask): """Taskflow to pull the embedded metadata out of image file""" def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Introspect, self).__init__( name='%s-Introspect-%s' % (task_type, task_id)) def execute(self, image_id, file_path): """Does the actual introspection :param image_id: Glance image ID :param file_path: Path to the file being introspected """ try: stdout, stderr = putils.trycmd('qemu-img', 'info', '--output=json', file_path, prlimit=utils.QEMU_IMG_PROC_LIMITS, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: # NOTE(flaper87): errno == 2 means the executable file # was not found. For now, log an error and move forward # until we have a better way to enable/disable optional # tasks. if exc.errno != 2: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = _LE('Failed to execute introspection ' '%(task_id)s: %(exc)s') LOG.error(msg, {'task_id': self.task_id, 'exc': exc_message}) return if stderr: raise RuntimeError(stderr) metadata = json.loads(stdout) new_image = self.image_repo.get(image_id) new_image.virtual_size = metadata.get('virtual-size', 0) new_image.disk_format = metadata.get('format') self.image_repo.save(new_image) LOG.debug("%(task_id)s: Introspection successful: %(file)s", {'task_id': self.task_id, 'file': file_path}) return new_image def get_flow(**kwargs): """Return task flow for introspecting images to obtain metadata about the image. :param task_id: Task ID :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s", {'task_type': task_type, 'id': task_id, 'repo': image_repo}) return lf.Flow(task_type).add( _Introspect(task_id, task_type, image_repo), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/location_import.py0000664000175000017500000003162400000000000022477 0ustar00zuulzuul00000000000000# Copyright 2024 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import glance_store as store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import secretutils from taskflow.patterns import linear_flow as lf from taskflow import retry from taskflow import task import glance.async_.flows.api_image_import as image_import from glance.common import exception from glance.common import store_utils from glance.i18n import _, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF class _HashCalculationFailed(exception.GlanceException): def __init__(self, message): super(_HashCalculationFailed, self).__init__(message) class _InvalidLocation(exception.GlanceException): def __init__(self, message): super(_InvalidLocation, self).__init__(message) class _CalculateHash(task.Task): def __init__(self, task_id, task_type, image_repo, image_id, hashing_algo, status=None): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id self.hashing_algo = hashing_algo self.image_status = status super(_CalculateHash, self).__init__( name='%s-CalculateHash-%s' % (task_type, task_id)) def _calculate_hash(self, image): current_os_hash_value = hashlib.new(self.hashing_algo) current_checksum = secretutils.md5(usedforsecurity=False) for chunk in image.get_data(): if chunk is None: break current_checksum.update(chunk) current_os_hash_value.update(chunk) image.checksum = current_checksum.hexdigest() image.os_hash_value = current_os_hash_value.hexdigest() def _set_checksum_and_hash(self, image): retries = 0 while retries <= CONF.http_retries and image.os_hash_value is None: retries += 1 try: self._calculate_hash(image) self.image_repo.save(image) except IOError as e: LOG.debug('[%i/%i] Hash calculation failed due to %s', retries, CONF.http_retries, encodeutils.exception_to_unicode(e)) if retries == CONF.http_retries: if image.status != 'active': # NOTE(pdeore): The image location add operation # should succeed so this exception should be raised # only when image status is not active. msg = (_('Hash calculation failed for image %s ' 'data') % self.image_id) raise _HashCalculationFailed(msg) else: msg = (_LW("Hash calculation failed for image %s " "data") % self.image_id) LOG.warning(msg) except store.exceptions.NotFound: # NOTE(pdeore): This can happen if image delete attempted # when hash calculation is in progress, which deletes the # image data from backend(specially rbd) but image remains # in 'active' state. # see: https://bugs.launchpad.net/glance/+bug/2045769 # Once this ceph side issue is fixed, we'll keep only the # warning message here and will remove the deletion part # which is a temporary workaround. LOG.debug(_('Failed to calculate checksum of %(image_id)s ' 'as image data has been deleted from the ' 'backend'), {'image_id': self.image_id}) image.delete() self.image_repo.remove(image) break def execute(self): image = self.image_repo.get(self.image_id) if image.status == 'queued': image.status = self.image_status image.os_hash_algo = self.hashing_algo self.image_repo.save(image) self._set_checksum_and_hash(image) def revert(self, result, **kwargs): """Set os_hash_algo to None when hash calculation fails and remove the location by reverting image to queued state """ try: image = self.image_repo.get(self.image_id) if image.status == 'importing': if not image.locations[0]['url'].startswith("http"): # NOTE(pdeore): `http` store doesn't allow deletion of # location: image.locations.pop() image.status = 'queued' image.os_hash_algo = None self.image_repo.save(image) except exception.NotFound: LOG.debug("Image %s might have been deleted from the backend", self.image_id) class _VerifyValidationData(task.Task): def __init__(self, task_id, task_type, image_repo, image_id, val_data): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id self.val_data = val_data super(_VerifyValidationData, self).__init__( name='%s-VerifyValidationData-%s' % (task_type, task_id)) def execute(self): """Verify the Validation Data with calculated Hash :param image_id: Glance Image ID :val_data: Validation Data provider by user """ image = self.image_repo.get(self.image_id) if self.val_data['os_hash_value'] != image.os_hash_value: msg = (_("os_hash_value: (%s) not matched with actual " "os_hash_value: (%s)") % ( self.val_data['os_hash_value'], image.os_hash_value)) raise exception.InvalidParameterValue(msg) def revert(self, result, **kwargs): """Set image status back to queued and set the hash values to None """ try: image = self.image_repo.get(self.image_id) if not image.locations[0]['url'].startswith("http"): # NOTE(pdeore): `http` store doesn't allow deletion of # location image.locations.pop() image.status = 'queued' image.os_hash_algo = None image.os_hash_value = None image.checksum = None self.image_repo.save(image) except exception.NotFound: LOG.debug("Image %s might have been deleted from the backend", self.image_id) class _SetHashValues(task.Task): def __init__(self, task_id, task_type, image_repo, image_id, val_data): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id self.val_data = val_data super(_SetHashValues, self).__init__( name='%s-SetHashValues-%s' % (task_type, task_id)) def execute(self): """Set user provided hash algo and value hash properties to image when do_secure_hash is False. :param image_id: Glance Image ID :val_data: Validation Data provided by user """ image = self.image_repo.get(self.image_id) for k, v in self.val_data.items(): setattr(image, k, v) self.image_repo.save(image) class _UpdateLocationTask(task.Task): def __init__(self, task_id, task_type, image_repo, image_id, url, context): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id self.url = url self.context = context super(_UpdateLocationTask, self).__init__( name='%s-UpdateLocationTask-%s' % (task_type, task_id)) def execute(self): """Update the image location :param image_id: Glance Image ID :param url: Location URL """ image = self.image_repo.get(self.image_id) try: # (NOTE(pdeore): Add metadata key to add the store identifier # as location metadata updated_location = { 'url': self.url, 'metadata': {}, } if CONF.enabled_backends: updated_location = store_utils.get_updated_store_location( [updated_location], context=self.context)[0] image.locations.append(updated_location) self.image_repo.save(image) except (exception.Invalid, exception.BadStoreUri) as e: raise _InvalidLocation(e.msg) class _SetImageToActiveTask(task.Task): def __init__(self, task_id, task_type, image_repo, image_id): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id super(_SetImageToActiveTask, self).__init__( name='%s-SetImageToActiveTask-%s' % (task_type, task_id)) def execute(self): """Set Image status to Active :param image_id: Glance Image ID """ image = self.image_repo.get(self.image_id) image.status = 'active' self.image_repo.save(image) def revert(self, result, **kwargs): """Set image status back to queued and remove the location if it's added. """ try: image = self.image_repo.get(self.image_id) if image.status != 'active': if not image.locations[0]['url'].startswith("http"): # NOTE(pdeore): `http` store doesn't allow deletion of # location image.locations.pop() if image.status == 'importing': image.status = 'queued' self.image_repo.save(image) except exception.NotFound: LOG.debug("Image %s might have been deleted from the backend", self.image_id) def get_flow(**kwargs): """Return task flow :param task_id: Task ID :param task_type: Type of the task :param task_repo: Task repo :param image_repo: Image repository used :param image_id: ID of the Image to be processed """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') task_repo = kwargs.get('task_repo') image_repo = kwargs.get('image_repo') admin_repo = kwargs.get('admin_repo') image_id = kwargs.get('image_id') val_data = kwargs.get('val_data', {}) loc_url = kwargs.get('loc_url') context = kwargs.get('context') hashing_algo = val_data.get("os_hash_algo", CONF['hashing_algorithm']) # Instantiate an action wrapper with the admin repo if we got one, # otherwise with the regular repo. action_wrapper = image_import.ImportActionWrapper( admin_repo or image_repo, image_id, task_id) kwargs['action_wrapper'] = action_wrapper flow = lf.Flow(task_type, retry=retry.AlwaysRevert()) flow.add(image_import._ImageLock(task_id, task_type, action_wrapper)) flow.add( _UpdateLocationTask(task_id, task_type, image_repo, image_id, loc_url, context)) if CONF.do_secure_hash: if val_data: flow.add( _CalculateHash(task_id, task_type, image_repo, image_id, hashing_algo, status='importing')) flow.add( _VerifyValidationData(task_id, task_type, image_repo, image_id, val_data)) flow.add( _SetImageToActiveTask(task_id, task_type, image_repo, image_id)) else: flow.add( _SetImageToActiveTask( task_id, task_type, image_repo, image_id)) flow.add( _CalculateHash(task_id, task_type, image_repo, image_id, hashing_algo)) elif val_data: flow.add( _SetHashValues(task_id, task_type, image_repo, image_id, val_data)) flow.add( _SetImageToActiveTask(task_id, task_type, image_repo, image_id)) else: flow.add( _SetImageToActiveTask(task_id, task_type, image_repo, image_id)) flow.add( image_import._CompleteTask(task_id, task_type, task_repo, action_wrapper)) return flow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/ovf_process.py0000664000175000017500000002537600000000000021634 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import shutil import tarfile import urllib from defusedxml import ElementTree as etree from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from taskflow.patterns import linear_flow as lf from taskflow import task from glance.i18n import _, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF # Define the CIM namespaces here. Currently we will be supporting extracting # properties only from CIM_ProcessorAllocationSettingData CIM_NS = {'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ProcessorAllocationSettingData': 'cim_pasd'} class _OVF_Process(task.Task): """ Extracts the single disk image from an OVA tarball and saves it to the Glance image store. It also parses the included OVF file for selected metadata which it then saves in the image store as the previously saved image's properties. """ default_provides = 'file_path' def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_OVF_Process, self).__init__( name='%s-OVF_Process-%s' % (task_type, task_id)) def _get_extracted_file_path(self, image_id): file_path = CONF.task.work_dir # NOTE(abhishekk): Use reserved 'os_glance_tasks_store' for tasks. if CONF.enabled_backends: file_path = getattr( CONF, 'os_glance_tasks_store').filesystem_store_datadir return os.path.join(file_path, "%s.extracted" % image_id) def _get_ova_iter_objects(self, uri): """Returns iterable object either for local file or uri :param uri: uri (remote or local) to the ova package we want to iterate """ if uri.startswith("file://"): uri = uri.split("file://")[-1] return open(uri, "rb") return urllib.request.urlopen(uri) def execute(self, image_id, file_path): """ :param image_id: Id to use when storing extracted image to Glance image store. It is assumed that some other task has already created a row in the store with this id. :param file_path: Path to the OVA package """ file_abs_path = file_path.split("file://")[-1] image = self.image_repo.get(image_id) # Expect 'ova' as image container format for OVF_Process task if image.container_format == 'ova': # FIXME(dramakri): This is an admin-only feature for security # reasons. Ideally this should be achieved by making the import # task API admin only. This is one of the items that the upcoming # import refactoring work plans to do. Until then, we will check # the context as a short-cut. if image.context and image.context.is_admin: extractor = OVAImageExtractor() data_iter = None try: data_iter = self._get_ova_iter_objects(file_path) disk, properties = extractor.extract(data_iter) image.extra_properties.update(properties) image.container_format = 'bare' self.image_repo.save(image) dest_path = self._get_extracted_file_path(image_id) with open(dest_path, 'wb') as f: shutil.copyfileobj(disk, f, 4096) finally: if data_iter: data_iter.close() # Overwrite the input ova file since it is no longer needed os.unlink(file_abs_path) os.rename(dest_path, file_abs_path) else: raise RuntimeError(_('OVA extract is limited to admin')) return file_path def revert(self, image_id, result, **kwargs): fs_path = self._get_extracted_file_path(image_id) if os.path.exists(fs_path): os.path.remove(fs_path) class OVAImageExtractor(object): """Extracts and parses the uploaded OVA package A class that extracts the disk image and OVF file from an OVA tar archive. Parses the OVF file for metadata of interest. """ def __init__(self): self.interested_properties = [] self._load_interested_properties() def extract(self, ova): """Extracts disk image and OVF file from OVA package Extracts a single disk image and OVF from OVA tar archive and calls OVF parser method. :param ova: a file object containing the OVA file :returns: a tuple of extracted disk file object and dictionary of properties parsed from the OVF file :raises RuntimeError: an error for malformed OVA and OVF files """ with tarfile.open(fileobj=ova) as tar_file: filenames = tar_file.getnames() ovf_filename = next((filename for filename in filenames if filename.endswith('.ovf')), None) if ovf_filename: ovf = tar_file.extractfile(ovf_filename) disk_name, properties = self._parse_OVF(ovf) ovf.close() else: raise RuntimeError(_('Could not find OVF file in OVA archive ' 'file.')) disk = tar_file.extractfile(disk_name) return (disk, properties) def _parse_OVF(self, ovf): """Parses the OVF file Parses the OVF file for specified metadata properties. Interested properties must be specified in ovf-metadata.json conf file. The OVF file's qualified namespaces are removed from the included properties. :param ovf: a file object containing the OVF file :returns: a tuple of disk filename and a properties dictionary :raises RuntimeError: an error for malformed OVF file """ def _get_namespace_and_tag(tag): """Separate and return the namespace and tag elements. There is no native support for this operation in elementtree package. See http://bugs.python.org/issue18304 for details. """ m = re.match(r'\{(.+)\}(.+)', tag) if m: return m.group(1), m.group(2) else: return '', tag disk_filename, file_elements, file_ref = None, None, None properties = {} for event, elem in etree.iterparse(ovf): if event == 'end': ns, tag = _get_namespace_and_tag(elem.tag) if ns in CIM_NS and tag in self.interested_properties: properties[CIM_NS[ns] + '_' + tag] = (elem.text.strip() if elem.text else '') if tag == 'DiskSection': disks = [child for child in list(elem) if _get_namespace_and_tag(child.tag)[1] == 'Disk'] if len(disks) > 1: """ Currently only single disk image extraction is supported. FIXME(dramakri): Support multiple images in OVA package """ raise RuntimeError(_('Currently, OVA packages ' 'containing multiple disk are ' 'not supported.')) disk = next(iter(disks)) file_ref = next(value for key, value in disk.items() if _get_namespace_and_tag(key)[1] == 'fileRef') if tag == 'References': file_elements = list(elem) # Clears elements to save memory except for 'File' and 'Disk' # references, which we will need to later access if tag != 'File' and tag != 'Disk': elem.clear() for file_element in file_elements: file_id = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'id') if file_id != file_ref: continue disk_filename = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'href') return (disk_filename, properties) def _load_interested_properties(self): """Find the OVF properties config file and load it. OVF properties config file specifies which metadata of interest to extract. Reads in a JSON file named 'ovf-metadata.json' if available. See example file at etc/ovf-metadata.json.sample. """ filename = 'ovf-metadata.json' match = CONF.find_file(filename) if match: with open(match, 'r') as properties_file: properties = json.loads(properties_file.read()) self.interested_properties = properties.get( 'cim_pasd', []) if not self.interested_properties: msg = _LW('OVF metadata of interest was not specified ' 'in ovf-metadata.json config file. Please ' 'set "cim_pasd" to a list of interested ' 'CIM_ProcessorAllocationSettingData ' 'properties.') LOG.warning(msg) else: LOG.warning(_LW('OVF properties config file "ovf-metadata.json" ' 'was not found.')) def get_flow(**kwargs): """Returns task flow for OVF Process. :param task_id: Task ID :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s", {'task_type': task_type, 'id': task_id, 'repo': image_repo}) return lf.Flow(task_type).add( _OVF_Process(task_id, task_type, image_repo), ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8543026 glance-29.0.0/glance/async_/flows/plugins/0000775000175000017500000000000000000000000020376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/__init__.py0000664000175000017500000000226200000000000022511 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from stevedore import named CONF = cfg.CONF def get_import_plugins(**kwargs): task_list = CONF.image_import_opts.image_import_plugins extensions = named.NamedExtensionManager('glance.image_import.plugins', names=task_list, name_order=True, invoke_on_load=True, invoke_kwds=kwargs) for extension in extensions.extensions: yield extension.obj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/image_conversion.py0000664000175000017500000002516500000000000024310 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from taskflow import task from glance.async_ import utils from glance.common import format_inspector from glance.i18n import _, _LI LOG = logging.getLogger(__name__) conversion_plugin_opts = [ cfg.StrOpt('output_format', default='raw', choices=('qcow2', 'raw', 'vmdk'), help=_(""" Desired output format for image conversion plugin. Provide a valid image format to which the conversion plugin will convert the image before storing it to the back-end. Note, if the Image Conversion plugin for image import is defined, users should only upload disk formats that are supported by `quemu-img` otherwise the conversion and import will fail. Possible values: * qcow2 * raw * vmdk Related Options: * disk_formats """)), ] CONF = cfg.CONF CONF.register_opts(conversion_plugin_opts, group='image_conversion') class _ConvertImage(task.Task): default_provides = 'file_path' def __init__(self, context, task_id, task_type, action_wrapper, stores): self.context = context self.task_id = task_id self.task_type = task_type self.action_wrapper = action_wrapper self.stores = stores self.image_id = action_wrapper.image_id self.dest_path = "" self.src_path = "" self.python = CONF.wsgi.python_interpreter super(_ConvertImage, self).__init__( name='%s-Convert_Image-%s' % (task_type, task_id)) def execute(self, file_path, **kwargs): with self.action_wrapper as action: return self._execute(action, file_path, **kwargs) def _execute(self, action, file_path, **kwargs): target_format = CONF.image_conversion.output_format # TODO(jokke): Once we support other schemas we need to take them into # account and handle the paths here. self.src_path = file_path.split('file://')[-1] dest_path = "%(path)s.%(target)s" % {'path': self.src_path, 'target': target_format} self.dest_path = dest_path source_format = action.image_disk_format # Use our own cautious inspector module (if we have one for this # format) to make sure a file is the format the submitter claimed # it is and that it passes some basic safety checks _before_ we run # qemu-img on it. # See https://bugs.launchpad.net/nova/+bug/2059809 for details. try: inspector = format_inspector.detect_file_format(self.src_path) if not inspector.safety_check(): LOG.error('Image failed %s safety check; aborting conversion', source_format) raise RuntimeError('Image has disallowed configuration') except RuntimeError: raise except format_inspector.ImageFormatError as e: LOG.error('Image claimed to be %s format failed format ' 'inspection: %s', source_format, e) raise RuntimeError('Image format detection failed') except Exception as e: LOG.exception('Unknown error inspecting image format: %s', e) raise RuntimeError('Unable to inspect image') if str(inspector) == 'iso': if source_format == 'iso': # NOTE(abhishekk): Excluding conversion and preserving image # disk_format as `iso` only LOG.debug("Avoiding conversion of an image %s having" " `iso` disk format.", self.image_id) return file_path # NOTE(abhishekk): Raising error as image detected as ISO but # claimed as different format LOG.error('Image claimed to be %s format but format ' 'inspection found: ISO', source_format) raise RuntimeError("Image has disallowed configuration") elif str(inspector) != source_format: LOG.error('Image claimed to be %s format failed format ' 'inspection', source_format) raise RuntimeError('Image format mismatch') try: stdout, stderr = putils.trycmd("qemu-img", "info", "-f", source_format, "--output=json", self.src_path, prlimit=utils.QEMU_IMG_PROC_LIMITS, python_exec=self.python, log_errors=putils.LOG_ALL_ERRORS,) except OSError as exc: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = ("Failed to do introspection as part of image " "conversion for %(iid)s: %(err)s") LOG.error(msg, {'iid': self.image_id, 'err': exc_message}) if stderr: raise RuntimeError(stderr) metadata = json.loads(stdout) if metadata.get('format') != source_format: LOG.error('Image claiming to be %s reported as %s by qemu-img', source_format, metadata.get('format', 'unknown')) raise RuntimeError('Image metadata disagrees about format') virtual_size = metadata.get('virtual-size', 0) action.set_image_attribute(virtual_size=virtual_size) if 'backing-filename' in metadata: LOG.warning('Refusing to process QCOW image with a backing file') raise RuntimeError( 'QCOW images with backing files are not allowed') try: data_file = metadata['format-specific']['data']['data-file'] except KeyError: data_file = None if data_file is not None: raise RuntimeError( 'QCOW images with data-file set are not allowed') if metadata.get('format') == 'vmdk': create_type = metadata.get( 'format-specific', {}).get( 'data', {}).get('create-type') allowed = CONF.image_format.vmdk_allowed_types if not create_type: raise RuntimeError(_('Unable to determine VMDK create-type')) if not len(allowed): LOG.warning(_('Refusing to process VMDK file as ' 'vmdk_allowed_types is empty')) raise RuntimeError(_('Image is a VMDK, but no VMDK createType ' 'is specified')) if create_type not in allowed: LOG.warning(_('Refusing to process VMDK file with create-type ' 'of %r which is not in allowed set of: %s'), create_type, ','.join(allowed)) raise RuntimeError(_('Invalid VMDK create-type specified')) if source_format == target_format: LOG.debug("Source is already in target format, " "not doing conversion for %s", self.image_id) return file_path try: stdout, stderr = putils.trycmd('qemu-img', 'convert', '-f', source_format, '-O', target_format, self.src_path, dest_path, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = "Failed to do image conversion for %(iid)s: %(err)s" LOG.error(msg, {'iid': self.image_id, 'err': exc_message}) if stderr: raise RuntimeError(stderr) action.set_image_attribute(disk_format=target_format, container_format='bare') new_size = os.stat(dest_path).st_size action.set_image_attribute(size=new_size) LOG.info(_LI('Updated image %s size=%i disk_format=%s'), self.image_id, new_size, target_format) os.remove(self.src_path) return "file://%s" % dest_path def revert(self, result=None, **kwargs): # NOTE(flaper87): If result is None, it probably # means this task failed. Otherwise, we would have # a result from its execution. if result is not None: LOG.debug("Image conversion failed.") if os.path.exists(self.dest_path): os.remove(self.dest_path) # NOTE(abhishekk): If we failed to convert the image, then none # of the _ImportToStore() tasks could have run, so we need # to move all stores out of "importing" to "failed". with self.action_wrapper as action: action.set_image_attribute(status='queued') if self.stores: action.remove_importing_stores(self.stores) action.add_failed_stores(self.stores) if self.src_path: try: os.remove(self.src_path) except FileNotFoundError: # NOTE(abhishekk): We must have raced with something # else, so this is not a problem pass def get_flow(**kwargs): """Return task flow for no-op. :param context: request context :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param image_id: Image ID :param action_wrapper: An api_image_import.ActionWrapper. """ context = kwargs.get('context') task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') action_wrapper = kwargs.get('action_wrapper') stores = kwargs.get('backend', []) return lf.Flow(task_type).add( _ConvertImage(context, task_id, task_type, action_wrapper, stores) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/image_decompression.py0000664000175000017500000001316300000000000024770 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gzip import os import shutil import zipfile from oslo_log import log as logging from oslo_utils import encodeutils from taskflow.patterns import linear_flow as lf from taskflow import task LOG = logging.getLogger(__name__) # Note(jokke): The number before '_' is offset for the magic number in header MAGIC_NUMBERS = { '0_zipfile': bytes([0x50, 0x4B, 0x03, 0x04]), '2_lhafile': bytes([0x2D, 0x6C, 0x68]), '0_gzipfile': bytes([0x1F, 0x8B, 0x08])} NO_LHA = False try: import lhafile except ImportError: LOG.debug("No lhafile available.") NO_LHA = True def header_lengths(): headers = [] for key, val in MAGIC_NUMBERS.items(): offset, key = key.split("_") headers.append(int(offset) + len(val)) return headers MAX_HEADER = max(header_lengths()) def _zipfile(src_path, dest_path, image_id): try: with zipfile.ZipFile(src_path, 'r') as zfd: content = zfd.namelist() if len(content) != 1: raise Exception("Archive contains more than one file.") else: zfd.extract(content[0], dest_path) except Exception as e: LOG.debug("ZIP: Error decompressing image %(iid)s: %(msg)s", { "iid": image_id, "msg": encodeutils.exception_to_unicode(e)}) raise def _lhafile(src_path, dest_path, image_id): if NO_LHA: raise Exception("No lhafile available.") try: with lhafile.LhaFile(src_path, 'r') as lfd: content = lfd.namelist() if len(content) != 1: raise Exception("Archive contains more than one file.") else: lfd.extract(content[0], dest_path) except Exception as e: LOG.debug("LHA: Error decompressing image %(iid)s: %(msg)s", { "iid": image_id, "msg": encodeutils.exception_to_unicode(e)}) raise def _gzipfile(src_path, dest_path, image_id): try: with gzip.open(src_path, 'r') as gzfd: with open(dest_path, 'wb') as fd: shutil.copyfileobj(gzfd, fd) except gzip.BadGzipFile as e: LOG.debug("ZIP: Error decompressing image %(iid)s: Bad GZip file: " "%(msg)s", {"iid": image_id, "msg": encodeutils.exception_to_unicode(e)}) raise except Exception as e: LOG.debug("GZIP: Error decompressing image %(iid)s: %(msg)s", { "iid": image_id, "msg": encodeutils.exception_to_unicode(e)}) raise class _DecompressImage(task.Task): default_provides = 'file_path' def __init__(self, context, task_id, task_type, image_repo, image_id): self.context = context self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.image_id = image_id self.dest_path = "" super(_DecompressImage, self).__init__( name='%s-Decompress_Image-%s' % (task_type, task_id)) def execute(self, file_path, **kwargs): # TODO(jokke): Once we support other schemas we need to take them into # account and handle the paths here. src_path = file_path.split('file://')[-1] self.dest_path = "%(path)s.uc" % {'path': src_path} image = self.image_repo.get(self.image_id) # NOTE(jokke): If the container format is 'compressed' the image is # expected to be compressed so lets not decompress it. if image.container_format == 'compressed': return "file://%s" % src_path head = None with open(src_path, 'rb') as fd: head = fd.read(MAX_HEADER) for key, val in MAGIC_NUMBERS.items(): offset, key = key.split("_") offset = int(offset) key = "_" + key if head.startswith(val, offset): globals()[key](src_path, self.dest_path, self.image_id) os.replace(self.dest_path, src_path) return "file://%s" % src_path def revert(self, result=None, **kwargs): # NOTE(flaper87, jokke): If result is None, it probably # means this task failed. Otherwise, we would have # a result from its execution. This includes the case # that nothing was to be compressed. if result is not None: LOG.debug("Image decompression failed.") if os.path.exists(self.dest_path): os.remove(self.dest_path) def get_flow(**kwargs): """Return task flow for no-op. :param context: request context :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param image_id: Image ID """ context = kwargs.get('context') task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') image_id = kwargs.get('image_id') return lf.Flow(task_type).add( _DecompressImage(context, task_id, task_type, image_repo, image_id), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/inject_image_metadata.py0000664000175000017500000000556600000000000025242 0ustar00zuulzuul00000000000000# Copyright 2018 NTT DATA, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from taskflow.patterns import linear_flow as lf from taskflow import task from glance.i18n import _ CONF = cfg.CONF inject_metadata_opts = [ cfg.ListOpt('ignore_user_roles', default='admin', help=_(""" Specify name of user roles to be ignored for injecting metadata properties in the image. Possible values: * List containing user roles. For example: [admin,member] """)), cfg.DictOpt('inject', default={}, help=_(""" Dictionary contains metadata properties to be injected in image. Possible values: * Dictionary containing key/value pairs. Key characters length should be <= 255. For example: k1:v1,k2:v2 """)), ] CONF.register_opts(inject_metadata_opts, group='inject_metadata_properties') class _InjectMetadataProperties(task.Task): def __init__(self, context, task_id, task_type, action_wrapper): self.context = context self.task_id = task_id self.task_type = task_type self.action_wrapper = action_wrapper self.image_id = action_wrapper.image_id super(_InjectMetadataProperties, self).__init__( name='%s-InjectMetadataProperties-%s' % (task_type, task_id)) def execute(self): """Inject custom metadata properties to image """ user_roles = self.context.roles ignore_user_roles = CONF.inject_metadata_properties.ignore_user_roles if not [role for role in user_roles if role in ignore_user_roles]: properties = CONF.inject_metadata_properties.inject if properties: with self.action_wrapper as action: action.set_image_extra_properties(properties) def get_flow(**kwargs): """Return task flow for inject_image_metadata. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. :param image_id: Image_ID used. :param context: Context used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') context = kwargs.get('context') action_wrapper = kwargs.get('action_wrapper') return lf.Flow(task_type).add( _InjectMetadataProperties(context, task_id, task_type, action_wrapper), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/no_op.py0000664000175000017500000000353200000000000022065 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from taskflow.patterns import linear_flow as lf from taskflow import task LOG = logging.getLogger(__name__) CONF = cfg.CONF class _Noop(task.Task): def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Noop, self).__init__( name='%s-Noop-%s' % (task_type, task_id)) def execute(self, **kwargs): LOG.debug("No_op import plugin") return def revert(self, result=None, **kwargs): # NOTE(flaper87): If result is None, it probably # means this task failed. Otherwise, we would have # a result from its execution. if result is not None: LOG.debug("No_op import plugin failed") return def get_flow(**kwargs): """Return task flow for no-op. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') return lf.Flow(task_type).add( _Noop(task_id, task_type, image_repo), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/flows/plugins/plugin_opts.py0000664000175000017500000000272400000000000023320 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.async_.flows.plugins.image_conversion import glance.async_.flows.plugins.inject_image_metadata # Note(jokke): This list contains tuples of config options for import plugins. # When new plugin is introduced its config options need to be added to this # list so that they can be processed, when config generator is used to generate # the glance-image-import.conf.sample it will also pick up the details. The # module needs to be imported as the Glance release packaged example(s) above # and the first part of the tuple refers to the group the options gets # registered under at the config file. PLUGIN_OPTS = [ ('inject_metadata_properties', glance.async_.flows.plugins.inject_image_metadata.inject_metadata_opts), ('image_conversion', glance.async_.flows.plugins.image_conversion.conversion_plugin_opts), ] def get_plugin_opts(): return PLUGIN_OPTS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/taskflow_executor.py0000664000175000017500000001660500000000000021715 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from stevedore import driver from taskflow import engines from taskflow.listeners import logging as llistener import glance.async_ from glance.common import exception from glance.common.scripts import utils as script_utils from glance.i18n import _, _LE LOG = logging.getLogger(__name__) _deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size', group='task') taskflow_executor_opts = [ cfg.StrOpt('engine_mode', default='parallel', choices=('serial', 'parallel'), help=_(""" Set the taskflow engine mode. Provide a string type value to set the mode in which the taskflow engine would schedule tasks to the workers on the hosts. Based on this mode, the engine executes tasks either in single or multiple threads. The possible values for this configuration option are: ``serial`` and ``parallel``. When set to ``serial``, the engine runs all the tasks in a single thread which results in serial execution of tasks. Setting this to ``parallel`` makes the engine run tasks in multiple threads. This results in parallel execution of tasks. Possible values: * serial * parallel Related options: * max_workers """)), cfg.IntOpt('max_workers', default=10, min=1, help=_(""" Set the number of engine executable tasks. Provide an integer value to limit the number of workers that can be instantiated on the hosts. In other words, this number defines the number of parallel tasks that can be executed at the same time by the taskflow engine. This value can be greater than one when the engine mode is set to parallel. Possible values: * Integer value greater than or equal to 1 Related options: * engine_mode """), deprecated_opts=[_deprecated_opt]) ] CONF = cfg.CONF CONF.register_opts(taskflow_executor_opts, group='taskflow_executor') class TaskExecutor(glance.async_.TaskExecutor): def __init__(self, context, task_repo, image_repo, image_factory, admin_repo=None): self.context = context self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory self.admin_repo = admin_repo super(TaskExecutor, self).__init__(context, task_repo, image_repo, image_factory, admin_repo=admin_repo) @staticmethod def _fetch_an_executor(): if CONF.taskflow_executor.engine_mode != 'parallel': return None else: max_workers = CONF.taskflow_executor.max_workers threadpool_cls = glance.async_.get_threadpool_model() return threadpool_cls(max_workers).pool def _get_flow(self, task): try: task_input = script_utils.unpack_task_input(task) kwds = { 'task_id': task.task_id, 'task_type': task.type, 'context': self.context, 'task_repo': self.task_repo, 'image_repo': self.image_repo, 'image_factory': self.image_factory, 'backend': task_input.get('backend') } if self.admin_repo: kwds['admin_repo'] = self.admin_repo if task.type == "import": uri = script_utils.validate_location_uri( task_input.get('import_from')) kwds['uri'] = uri if task.type == 'api_image_import': kwds['image_id'] = task_input['image_id'] kwds['import_req'] = task_input['import_req'] if task.type == 'location_import': kwds['image_id'] = task_input['image_id'] kwds['loc_url'] = task_input.get('loc_url') kwds['val_data'] = task_input.get('validation_data', {}) return driver.DriverManager('glance.flows', task.type, invoke_on_load=True, invoke_kwds=kwds).driver except urllib.error.URLError as exc: raise exception.ImportTaskError(message=exc.reason) except (exception.BadStoreUri, exception.Invalid) as exc: raise exception.ImportTaskError(message=exc.msg) except exception.LimitExceeded as exc: raise exception.ImportTaskError(message=exc.msg) except RuntimeError: raise NotImplementedError() except Exception as e: LOG.exception(_LE('Task initialization failed: %s'), str(e)) raise def begin_processing(self, task_id): try: super(TaskExecutor, self).begin_processing(task_id) except exception.ImportTaskError as exc: LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s'), {'task_id': task_id, 'exc': exc.msg}) task = self.task_repo.get(task_id) task.fail(exc.msg) self.task_repo.save(task) def _run(self, task_id, task_type): LOG.debug('Taskflow executor picked up the execution of task ID ' '%(task_id)s of task type ' '%(task_type)s', {'task_id': task_id, 'task_type': task_type}) task = script_utils.get_task(self.task_repo, task_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return flow = self._get_flow(task) executor = self._fetch_an_executor() try: engine = engines.load( flow, engine=CONF.taskflow_executor.engine_mode, executor=executor, max_workers=CONF.taskflow_executor.max_workers) with llistener.DynamicLoggingListener(engine, log=LOG): engine.run() except exception.UploadException as exc: task.fail(encodeutils.exception_to_unicode(exc)) self.task_repo.save(task) except Exception as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s'), {'task_id': task_id, 'exc': encodeutils.exception_to_unicode(exc)}) # TODO(sabari): Check for specific exceptions and update the # task failure message. task.fail(_('Task failed due to Internal Error')) self.task_repo.save(task) finally: if executor is not None: executor.shutdown() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/async_/utils.py0000664000175000017500000001016600000000000017301 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import units from taskflow import task from glance.common import exception as glance_exception from glance.i18n import _LW LOG = logging.getLogger(__name__) # NOTE(hemanthm): As reported in the bug #1449062, "qemu-img info" calls can # be exploited to craft DoS attacks by providing malicious input. The process # limits defined here are protections against such attacks. This essentially # limits the CPU time and address space used by the process that executes # "qemu-img info" command to 2 seconds and 1 GB respectively. QEMU_IMG_PROC_LIMITS = putils.ProcessLimits(cpu_time=2, address_space=1 * units.Gi) class OptionalTask(task.Task): def __init__(self, *args, **kwargs): super(OptionalTask, self).__init__(*args, **kwargs) self.execute = self._catch_all(self.execute) def _catch_all(self, func): # NOTE(flaper87): Read this comment before calling the MI6 # Here's the thing, there's no nice way to define "optional" # tasks. That is, tasks whose failure shouldn't affect the execution # of the flow. The only current "sane" way to do this, is by catching # everything and logging. This seems harmless from a taskflow # perspective but it is not. There are some issues related to this # "workaround": # # - Task's states will shamelessly lie to us saying the task succeeded. # # - No revert procedure will be triggered, which means optional tasks, # for now, mustn't cause any side-effects because they won't be able to # clean them up. If these tasks depend on other task that do cause side # effects, a task that cleans those side effects most be registered as # well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS. # # - Ideally, optional tasks shouldn't `provide` new values unless they # are part of an optional flow. Due to the decoration of the execute # method, these tasks will need to define the provided methods at # class level using `default_provides`. # # # The taskflow team is working on improving this and on something that # will provide the ability of defining optional tasks. For now, to lie # ourselves we must. # # NOTE(harlowja): The upstream change that is hopefully going to make # this easier/built-in is at: https://review.opendev.org/#/c/271116/ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: msg = (_LW("An optional task has failed, " "the failure was: %s") % encodeutils.exception_to_unicode(exc)) LOG.warning(msg) return wrapper def get_glance_endpoint(context, region, interface): """Return glance endpoint depending the input tasks """ # We use the current context to retrieve the image catalog = context.service_catalog for service in catalog: if service['type'] == 'image': for endpoint in service['endpoints']: if endpoint['region'].lower() == region.lower(): return endpoint.get('%sURL' % interface) raise glance_exception.GlanceEndpointNotFound(region=region, interface=interface) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8543026 glance-29.0.0/glance/cmd/0000775000175000017500000000000000000000000015052 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/__init__.py0000664000175000017500000000000000000000000017151 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/api.py0000664000175000017500000001014700000000000016200 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance API Server """ import os import sys import eventlet # NOTE(jokke): As per the eventlet commit # b756447bab51046dfc6f1e0e299cc997ab343701 there's circular import happening # which can be solved making sure the hubs are properly and fully imported # before calling monkey_patch(). This is solved in eventlet 0.22.0 but we # need to address it before that is widely used around. eventlet.hubs.get_hub() if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.patcher.monkey_patch(os=False) else: eventlet.patcher.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading import threading orig_threading.current_thread.__globals__['_active'] = threading._active from oslo_reports import guru_meditation_report as gmr from oslo_utils import encodeutils # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from oslo_config import cfg from oslo_log import log as logging import osprofiler.initializer import glance.async_ from glance.common import config from glance.common import exception from glance.common import wsgi from glance import notifier from glance import version CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") logging.register_options(CONF) wsgi.register_cli_opts() # NOTE(rosmaita): Any new exceptions added should preserve the current # error codes for backward compatibility. The value 99 is returned # for errors not listed in this map. ERROR_CODE_MAP = {RuntimeError: 1, exception.WorkerCreationFailure: 2, glance_store.exceptions.BadStoreConfiguration: 3, ValueError: 4, cfg.ConfigFileValueError: 5} def fail(e): sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) return_code = ERROR_CODE_MAP.get(type(e), 99) sys.exit(return_code) def main(): try: config.parse_args() config.set_config_defaults() wsgi.set_eventlet_hub() logging.setup(CONF, 'glance') gmr.TextGuruMeditation.setup_autorun(version) notifier.set_defaults() if CONF.profiler.enabled: osprofiler.initializer.init_from_conf( conf=CONF, context={}, project="glance", service="api", host=CONF.bind_host ) # NOTE(danms): Configure system-wide threading model to use eventlet glance.async_.set_threadpool_model('eventlet') server = wsgi.Server(initialize_glance_store=True) server.start(config.load_paste_app('glance-api'), default_port=9292) server.wait() except Exception as e: fail(e) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/cache_cleaner.py0000664000175000017500000000415400000000000020164 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Invalid Cache Entry and Stalled Image cleaner This is meant to be run as a periodic task from cron. If something goes wrong while we're caching an image (for example the fetch times out, or an exception is raised), we create an 'invalid' entry. These entries are left around for debugging purposes. However, after some period of time, we want to clean these up. Also, if an incomplete image hangs around past the image_cache_stall_time period, we automatically sweep it up. """ import os import sys from oslo_log import log as logging # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import config from glance.image_cache import cleaner CONF = config.CONF logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') app = cleaner.Cleaner() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/cache_manage.py0000664000175000017500000004256000000000000020006 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A simple cache management utility for Glance. """ import argparse import collections import datetime import functools import os import sys import time import uuid from oslo_utils import encodeutils import prettytable # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import exception import glance.image_cache.client from glance.version import version_info as version SUCCESS = 0 FAILURE = 1 def validate_input(func): """Decorator to enforce validation on input""" @functools.wraps(func) def wrapped(*args, **kwargs): if len(args[0].command) > 2: print("Please specify the ID of the image you wish for command " "'%s' from the cache as the first and only " "argument." % args[0].command[0]) return FAILURE if len(args[0].command) == 2: image_id = args[0].command[1] try: image_id = uuid.UUID(image_id) except ValueError: print("Image ID '%s' is not a valid UUID." % image_id) return FAILURE return func(args[0], **kwargs) return wrapped def catch_error(action): """Decorator to provide sensible default error handling for actions.""" def wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) return SUCCESS if ret is None else ret except exception.NotFound: options = args[0] print("Cache management middleware not enabled on host %s" % options.host) return FAILURE except exception.Forbidden: print("Not authorized to make this request.") return FAILURE except Exception as e: options = args[0] if options.debug: raise print("Failed to %s. Got error:" % action) pieces = encodeutils.exception_to_unicode(e).split('\n') for piece in pieces: print(piece) return FAILURE return wrapper return wrap @catch_error('show cached images') def list_cached(args): """%(prog)s list-cached [options] List all images currently cached. """ client = get_client(args) images = client.get_cached_images() if not images: print("No cached images.") return SUCCESS print("Found %d cached images..." % len(images)) pretty_table = prettytable.PrettyTable(("ID", "Last Accessed (UTC)", "Last Modified (UTC)", "Size", "Hits")) pretty_table.align['Size'] = "r" pretty_table.align['Hits'] = "r" for image in images: last_accessed = image['last_accessed'] if last_accessed == 0: last_accessed = "N/A" else: last_accessed = datetime.datetime.utcfromtimestamp( last_accessed).isoformat() pretty_table.add_row(( image['image_id'], last_accessed, datetime.datetime.utcfromtimestamp( image['last_modified']).isoformat(), image['size'], image['hits'])) print(pretty_table.get_string()) return SUCCESS @catch_error('show queued images') def list_queued(args): """%(prog)s list-queued [options] List all images currently queued for caching. """ client = get_client(args) images = client.get_queued_images() if not images: print("No queued images.") return SUCCESS print("Found %d queued images..." % len(images)) pretty_table = prettytable.PrettyTable(("ID",)) for image in images: pretty_table.add_row((image,)) print(pretty_table.get_string()) @catch_error('queue the specified image for caching') @validate_input def queue_image(args): """%(prog)s queue-image [options] Queues an image for caching. """ image_id = args.command[1] if (not args.force and not user_confirm("Queue image %(image_id)s for caching?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(args) client.queue_image_for_caching(image_id) if args.verbose: print("Queued image %(image_id)s for caching" % {'image_id': image_id}) return SUCCESS @catch_error('delete the specified cached image') @validate_input def delete_cached_image(args): """%(prog)s delete-cached-image [options] Deletes an image from the cache. """ image_id = args.command[1] if (not args.force and not user_confirm("Delete cached image %(image_id)s?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(args) client.delete_cached_image(image_id) if args.verbose: print("Deleted cached image %(image_id)s" % {'image_id': image_id}) return SUCCESS @catch_error('Delete all cached images') def delete_all_cached_images(args): """%(prog)s delete-all-cached-images [options] Remove all images from the cache. """ if (not args.force and not user_confirm("Delete all cached images?", default=False)): return SUCCESS client = get_client(args) num_deleted = client.delete_all_cached_images() if args.verbose: print("Deleted %(num_deleted)s cached images" % {'num_deleted': num_deleted}) return SUCCESS @catch_error('delete the specified queued image') @validate_input def delete_queued_image(args): """%(prog)s delete-queued-image [options] Deletes an image from the cache. """ image_id = args.command[1] if (not args.force and not user_confirm("Delete queued image %(image_id)s?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(args) client.delete_queued_image(image_id) if args.verbose: print("Deleted queued image %(image_id)s" % {'image_id': image_id}) return SUCCESS @catch_error('Delete all queued images') def delete_all_queued_images(args): """%(prog)s delete-all-queued-images [options] Remove all images from the cache queue. """ if (not args.force and not user_confirm("Delete all queued images?", default=False)): return SUCCESS client = get_client(args) num_deleted = client.delete_all_queued_images() if args.verbose: print("Deleted %(num_deleted)s queued images" % {'num_deleted': num_deleted}) return SUCCESS def get_client(options): """Return a new client object to a Glance server. specified by the --host and --port options supplied to the CLI """ # Generate auth_url based on identity_api_version identity_version = env('OS_IDENTITY_API_VERSION', default='3') auth_url = options.os_auth_url if identity_version == '3' and "/v3" not in auth_url: auth_url = auth_url + "/v3" elif identity_version == '2' and "/v2" not in auth_url: auth_url = auth_url + "/v2.0" user_domain_id = options.os_user_domain_id if not user_domain_id: user_domain_id = options.os_domain_id project_domain_id = options.os_project_domain_id if not user_domain_id: project_domain_id = options.os_domain_id return glance.image_cache.client.get_client( host=options.host, port=options.port, username=options.os_username, password=options.os_password, project=options.os_project_name, user_domain_id=user_domain_id, project_domain_id=project_domain_id, auth_url=auth_url, auth_strategy=options.os_auth_strategy, auth_token=options.os_auth_token, region=options.os_region_name, insecure=options.insecure) def env(*vars, **kwargs): """Search for the first defined of possibly many env vars. Returns the first environment variable defined in vars, or returns the default defined in kwargs. """ for v in vars: value = os.environ.get(v) if value: return value return kwargs.get('default', '') def print_help(args): """ Print help specific to a command """ command = lookup_command(args.command[1]) print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) def parse_args(parser): """Set up the CLI and config-file options that may be parsed and program commands. :param parser: The option parser """ parser.add_argument('command', default='help', nargs='+', help='The command to execute') parser.add_argument('-v', '--verbose', default=False, action="store_true", help="Print more verbose output.") parser.add_argument('-d', '--debug', default=False, action="store_true", help="Print debugging output.") parser.add_argument('-H', '--host', metavar="ADDRESS", default="0.0.0.0", help="Address of Glance API host.") parser.add_argument('-p', '--port', dest="port", metavar="PORT", type=int, default=9292, help="Port the Glance API host listens on.") parser.add_argument('-k', '--insecure', dest="insecure", default=False, action="store_true", help='Explicitly allow glance to perform "insecure" ' "SSL (https) requests. The server's certificate " "will not be verified against any certificate " "authorities. This option should be used with " "caution.") parser.add_argument('-f', '--force', dest="force", default=False, action="store_true", help="Prevent select actions from requesting " "user confirmation.") parser.add_argument('--os-auth-token', dest='os_auth_token', default=env('OS_AUTH_TOKEN'), help='Defaults to env[OS_AUTH_TOKEN].') parser.add_argument('-A', '--os_auth_token', '--auth_token', dest='os_auth_token', help=argparse.SUPPRESS) parser.add_argument('--os-username', dest='os_username', default=env('OS_USERNAME'), help='Defaults to env[OS_USERNAME].') parser.add_argument('-I', '--os_username', dest='os_username', help=argparse.SUPPRESS) parser.add_argument('--os-password', dest='os_password', default=env('OS_PASSWORD'), help='Defaults to env[OS_PASSWORD].') parser.add_argument('-K', '--os_password', dest='os_password', help=argparse.SUPPRESS) parser.add_argument('--os-region-name', dest='os_region_name', default=env('OS_REGION_NAME'), help='Defaults to env[OS_REGION_NAME].') parser.add_argument('-R', '--os_region_name', dest='os_region_name', help=argparse.SUPPRESS) parser.add_argument('--os-project-id', dest='os_project_id', default=env('OS_PROJECT_ID'), help='Defaults to env[OS_PROJECT_ID].') parser.add_argument('--os_project_id', dest='os_project_id', help=argparse.SUPPRESS) parser.add_argument('--os-project-name', dest='os_project_name', default=env('OS_PROJECT_NAME'), help='Defaults to env[OS_PROJECT_NAME].') parser.add_argument('-T', '--os_project_name', dest='os_project_name', help=argparse.SUPPRESS) # arguments related user, project domain parser.add_argument('--os-user-domain-id', dest='os_user_domain_id', default=env('OS_USER_DOMAIN_ID'), help='Defaults to env[OS_USER_DOMAIN_ID].') parser.add_argument('--os-project-domain-id', dest='os_project_domain_id', default=env('OS_PROJECT_DOMAIN_ID'), help='Defaults to env[OS_PROJECT_DOMAIN_ID].') parser.add_argument('--os-domain-id', dest='os_domain_id', default=env('OS_DOMAIN_ID', default='default'), help='Defaults to env[OS_DOMAIN_ID].') parser.add_argument('--os-auth-url', default=env('OS_AUTH_URL'), help='Defaults to env[OS_AUTH_URL].') parser.add_argument('-N', '--os_auth_url', dest='os_auth_url', help=argparse.SUPPRESS) parser.add_argument('-S', '--os_auth_strategy', dest="os_auth_strategy", metavar="STRATEGY", help="Authentication strategy (keystone or noauth).") version_string = version.cached_version_string() parser.add_argument('--version', action='version', version=version_string) return parser.parse_args() CACHE_COMMANDS = collections.OrderedDict() CACHE_COMMANDS['help'] = ( print_help, 'Output help for one of the commands below') CACHE_COMMANDS['list-cached'] = ( list_cached, 'List all images currently cached') CACHE_COMMANDS['list-queued'] = ( list_queued, 'List all images currently queued for caching') CACHE_COMMANDS['queue-image'] = ( queue_image, 'Queue an image for caching') CACHE_COMMANDS['delete-cached-image'] = ( delete_cached_image, 'Purges an image from the cache') CACHE_COMMANDS['delete-all-cached-images'] = ( delete_all_cached_images, 'Removes all images from the cache') CACHE_COMMANDS['delete-queued-image'] = ( delete_queued_image, 'Deletes an image from the cache queue') CACHE_COMMANDS['delete-all-queued-images'] = ( delete_all_queued_images, 'Deletes all images from the cache queue') def _format_command_help(): """Formats the help string for subcommands.""" help_msg = "Commands:\n\n" for command, info in CACHE_COMMANDS.items(): if command == 'help': command = 'help ' help_msg += " %-28s%s\n\n" % (command, info[1]) return help_msg def lookup_command(command_name): try: command = CACHE_COMMANDS[command_name] return command[0] except KeyError: print('\nError: "%s" is not a valid command.\n' % command_name) print(_format_command_help()) sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name}) def user_confirm(prompt, default=False): """Yes/No question dialog with user. :param prompt: question/statement to present to user (string) :param default: boolean value to return if empty string is received as response to prompt """ if default: prompt_default = "[Y/n]" else: prompt_default = "[y/N]" answer = input("%s %s " % (prompt, prompt_default)) if answer == "": return default else: return answer.lower() in ("yes", "y") def main(): print('In the Caracal development cycle, the glance-cache-manage command ' 'has been deprecated in favor of the new Cache API. It is scheduled ' 'to be removed in the Dalmatian development cycle.', file=sys.stderr) parser = argparse.ArgumentParser( description=_format_command_help(), formatter_class=argparse.RawDescriptionHelpFormatter) args = parse_args(parser) if args.command[0] == 'help' and len(args.command) == 1: parser.print_help() return # Look up the command to run command = lookup_command(args.command[0]) try: start_time = time.time() result = command(args) end_time = time.time() if args.verbose: print("Completed in %-0.4f sec." % (end_time - start_time)) sys.exit(result) except (RuntimeError, NotImplementedError) as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/cache_prefetcher.py0000664000175000017500000000424200000000000020700 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Pre-fetcher This is meant to be run from the command line after queueing images to be pretched. """ import os import sys # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from oslo_log import log as logging import glance.async_ from glance.common import config from glance.image_cache import prefetcher CONF = config.CONF logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') CONF.import_opt('enabled_backends', 'glance.common.wsgi') glance.async_.set_threadpool_model('eventlet') if CONF.enabled_backends: glance_store.register_store_opts(CONF) glance_store.create_multi_stores(CONF) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() app = prefetcher.Prefetcher() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/cache_pruner.py0000664000175000017500000000333100000000000020062 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Pruner This is meant to be run as a periodic task, perhaps every half-hour. """ import os import sys from oslo_log import log as logging # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import config from glance.image_cache import pruner CONF = config.CONF logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') app = pruner.Pruner() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/control.py0000664000175000017500000003244200000000000017111 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Helper script for starting/stopping/reloading Glance server programs. Thanks for some of the code, Swifties ;) """ import argparse import fcntl import os import resource import signal import subprocess import sys import tempfile import time # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from oslo_config import cfg from oslo_utils import units from glance.common import config from glance.i18n import _ CONF = cfg.CONF ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart', 'reload', 'force-reload'] ALL_SERVERS = ['api', 'scrubber'] RELOAD_SERVERS = ['glance-api'] GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-scrubber'] MAX_DESCRIPTORS = 32768 MAX_MEMORY = 2 * units.Gi # 2 GB USAGE = """%(prog)s [options] [CONFPATH] Where is one of: all, {0} And command is one of: {1} And CONFPATH is the optional configuration file to use.""".format( ', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS)) exitcode = 0 def gated_by(predicate): def wrap(f): def wrapped_f(*args): if predicate: return f(*args) else: return None return wrapped_f return wrap def pid_files(server, pid_file): pid_files = [] if pid_file: if os.path.exists(os.path.abspath(pid_file)): pid_files = [os.path.abspath(pid_file)] else: if os.path.exists('/var/run/glance/%s.pid' % server): pid_files = ['/var/run/glance/%s.pid' % server] for pid_file in pid_files: pid = int(open(pid_file).read().strip()) yield pid_file, pid def do_start(verb, pid_file, server, args): if verb != 'Respawn' and pid_file == CONF.pid_file: for pid_file, pid in pid_files(server, pid_file): if os.path.exists('/proc/%s' % pid): print(_("%(serv)s appears to already be running: %(pid)s") % {'serv': server, 'pid': pid_file}) return else: print(_("Removing stale pid file %s") % pid_file) os.unlink(pid_file) try: resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: print(_('Unable to increase file descriptor limit. ' 'Running as non-root?')) os.environ['PYTHON_EGG_CACHE'] = '/tmp' def write_pid_file(pid_file, pid): with open(pid_file, 'w') as fp: fp.write('%d\n' % pid) def redirect_to_null(fds): with open(os.devnull, 'r+b') as nullfile: for desc in fds: # close fds try: os.dup2(nullfile.fileno(), desc) except OSError: pass def redirect_to_syslog(fds, server): log_cmd = 'logger' log_cmd_params = '-t "%s[%d]"' % (server, os.getpid()) process = subprocess.Popen([log_cmd, log_cmd_params], stdin=subprocess.PIPE) for desc in fds: # pipe to logger command try: os.dup2(process.stdin.fileno(), desc) except OSError: pass def redirect_stdio(server, capture_output): input = [sys.stdin.fileno()] output = [sys.stdout.fileno(), sys.stderr.fileno()] redirect_to_null(input) if capture_output: redirect_to_syslog(output, server) else: redirect_to_null(output) @gated_by(CONF.capture_output) def close_stdio_on_exec(): fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()] for desc in fds: # set close on exec flag fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC) def launch(pid_file, conf_file=None, capture_output=False, await_time=0): args = [server] if conf_file: args += ['--config-file', conf_file] msg = (_('%(verb)sing %(serv)s with %(conf)s') % {'verb': verb, 'serv': server, 'conf': conf_file}) else: msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server}) print(msg) close_stdio_on_exec() pid = os.fork() if pid == 0: os.setsid() redirect_stdio(server, capture_output) try: os.execlp('%s' % server, *args) except OSError as e: msg = (_('unable to launch %(serv)s. Got error: %(e)s') % {'serv': server, 'e': e}) sys.exit(msg) else: write_pid_file(pid_file, pid) await_child(pid, await_time) return pid @gated_by(CONF.await_child) def await_child(pid, await_time): bail_time = time.time() + await_time while time.time() < bail_time: reported_pid, status = os.waitpid(pid, os.WNOHANG) if reported_pid == pid: global exitcode exitcode = os.WEXITSTATUS(status) break time.sleep(0.05) conf_file = None if args and os.path.exists(args[0]): conf_file = os.path.abspath(os.path.expanduser(args[0])) return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child) def do_check_status(pid_file, server): if os.path.exists(pid_file): with open(pid_file, 'r') as pidfile: pid = pidfile.read().strip() print(_("%(serv)s (pid %(pid)s) is running...") % {'serv': server, 'pid': pid}) else: print(_("%s is stopped") % server) def get_pid_file(server, pid_file): pid_file = (os.path.abspath(pid_file) if pid_file else '/var/run/glance/%s.pid' % server) dir, file = os.path.split(pid_file) if not os.path.exists(dir): try: os.makedirs(dir) except OSError: pass if not os.access(dir, os.W_OK): fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server) msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n' 'Falling back to a temp file, you can stop %(service)s ' 'service using:\n' ' %(file)s %(server)s stop --pid-file %(fb)s') % {'pid': pid_file, 'service': server, 'file': __file__, 'server': server, 'fb': fallback}) print(msg) pid_file = fallback return pid_file def do_reload(pid_file, server): if server not in RELOAD_SERVERS: msg = (_('Reload of %(serv)s not supported') % {'serv': server}) sys.exit(msg) pid = None if os.path.exists(pid_file): with open(pid_file, 'r') as pidfile: pid = int(pidfile.read().strip()) else: msg = (_('Server %(serv)s is stopped') % {'serv': server}) sys.exit(msg) sig = signal.SIGHUP try: print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)') % {'serv': server, 'pid': pid, 'sig': sig}) os.kill(pid, sig) except OSError: print(_("Process %d not running") % pid) def do_stop(server, args, graceful=False): if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: sig = signal.SIGHUP else: sig = signal.SIGTERM did_anything = False pfiles = pid_files(server, CONF.pid_file) for pid_file, pid in pfiles: did_anything = True try: os.unlink(pid_file) except OSError: pass try: print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)') % {'serv': server, 'pid': pid, 'sig': sig}) os.kill(pid, sig) except OSError: print(_("Process %d not running") % pid) for pid_file, pid in pfiles: for _junk in range(150): # 15 seconds if not os.path.exists('/proc/%s' % pid): break time.sleep(0.1) else: print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;' ' giving up') % {'pid': pid, 'file': pid_file}) if not did_anything: print(_('%s is already stopped') % server) def add_command_parsers(subparsers): cmd_parser = argparse.ArgumentParser(add_help=False) cmd_subparsers = cmd_parser.add_subparsers(dest='command') for cmd in ALL_COMMANDS: parser = cmd_subparsers.add_parser(cmd) parser.add_argument('args', nargs=argparse.REMAINDER) for server in ALL_SERVERS: full_name = 'glance-' + server parser = subparsers.add_parser(server, parents=[cmd_parser]) parser.set_defaults(servers=[full_name]) parser = subparsers.add_parser(full_name, parents=[cmd_parser]) parser.set_defaults(servers=[full_name]) parser = subparsers.add_parser('all', parents=[cmd_parser]) parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS]) def main(): global exitcode opts = [ cfg.SubCommandOpt('server', title='Server types', help='Available server types', handler=add_command_parsers), cfg.StrOpt('pid-file', metavar='PATH', help='File to use as pid file. Default: ' '/var/run/glance/$server.pid.'), cfg.IntOpt('await-child', metavar='DELAY', default=0, help='Period to wait for service death ' 'in order to report exit code ' '(default is to not wait at all).'), cfg.BoolOpt('capture-output', default=False, help='Capture stdout/err in syslog ' 'instead of discarding it.'), cfg.BoolOpt('respawn', default=False, help='Restart service on unexpected death.'), ] CONF.register_cli_opts(opts) config.parse_args(usage=USAGE) @gated_by(CONF.await_child) @gated_by(CONF.respawn) def mutually_exclusive(): sys.stderr.write('--await-child and --respawn are mutually exclusive') sys.exit(1) mutually_exclusive() @gated_by(CONF.respawn) def anticipate_respawn(children): while children: pid, status = os.wait() if pid in children: (pid_file, server, args) = children.pop(pid) running = os.path.exists(pid_file) one_second_ago = time.time() - 1 bouncing = (running and os.path.getmtime(pid_file) >= one_second_ago) if running and not bouncing: args = (pid_file, server, args) new_pid = do_start('Respawn', *args) children[new_pid] = args else: rsn = 'bouncing' if bouncing else 'deliberately stopped' print(_('Suppressed respawn as %(serv)s was %(rsn)s.') % {'serv': server, 'rsn': rsn}) if CONF.server.command == 'start': children = {} for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) args = (pid_file, server, CONF.server.args) pid = do_start('Start', *args) children[pid] = args anticipate_respawn(children) if CONF.server.command == 'status': for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_check_status(pid_file, server) if CONF.server.command == 'stop': for server in CONF.server.servers: do_stop(server, CONF.server.args) if CONF.server.command == 'shutdown': for server in CONF.server.servers: do_stop(server, CONF.server.args, graceful=True) if CONF.server.command == 'restart': for server in CONF.server.servers: do_stop(server, CONF.server.args) for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_start('Restart', pid_file, server, CONF.server.args) if CONF.server.command in ('reload', 'force-reload'): for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_reload(pid_file, server) sys.exit(exitcode) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/manage.py0000664000175000017500000005516000000000000016663 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Management Utility """ # FIXME(sirp): When we have glance-admin we can consider merging this into it # Perhaps for consistency with Nova, we would then rename glance-admin -> # glance-manage (or the other way around) import os import sys import time # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from alembic import command as alembic_command from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import encodeutils from glance.common import config from glance.common import exception from glance import context from glance.db import migration as db_migration from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import metadata from glance.i18n import _ CONF = cfg.CONF USE_TRIGGERS = True # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class DbCommands(object): """Class for managing the db""" def __init__(self): pass def version(self): """Print database's current migration level""" current_heads = alembic_migrations.get_current_alembic_heads() if current_heads: # Migrations are managed by alembic for head in current_heads: print(head) else: # Migrations are managed by legacy versioning scheme print(_('Database is either not under migration control or under ' 'legacy migration control, please run ' '"glance-manage db sync" to place the database under ' 'alembic migration control.')) def check(self): """Report any pending database upgrades. An exit code of 3 indicates db expand is needed, see stdout output. An exit code of 4 indicates db migrate is needed, see stdout output. An exit code of 5 indicates db contract is needed, see stdout output. """ engine = db_api.get_engine() self._validate_engine(engine) curr_heads = alembic_migrations.get_current_alembic_heads() expand_heads = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) contract_heads = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if (contract_heads in curr_heads): print(_('Database is up to date. No upgrades needed.')) sys.exit() elif ((not expand_heads) or (expand_heads not in curr_heads)): print(_('Your database is not up to date. ' 'Your first step is to run `glance-manage db expand`.')) sys.exit(3) elif data_migrations.has_pending_migrations(db_api.get_engine()): print(_('Your database is not up to date. ' 'Your next step is to run `glance-manage db migrate`.')) sys.exit(4) elif ((not contract_heads) or (contract_heads not in curr_heads)): print(_('Your database is not up to date. ' 'Your next step is to run `glance-manage db contract`.')) sys.exit(5) @args('--version', metavar='', help='Database version') def upgrade(self, version='heads'): """Upgrade the database's migration level""" self._sync(version) @args('--version', metavar='', help='Database version') def version_control(self, version=db_migration.ALEMBIC_INIT_VERSION): """Place a database under migration control""" if version is None: version = db_migration.ALEMBIC_INIT_VERSION a_config = alembic_migrations.get_alembic_config() alembic_command.stamp(a_config, version) print(_("Placed database under migration control at " "revision:"), version) @args('--version', metavar='', help='Database version') def sync(self, version=None): """Perform a complete (offline) database migration""" global USE_TRIGGERS # This flags let's us bypass trigger setup & teardown for non-rolling # upgrades. We set this as a global variable immediately before handing # off to alembic, because we can't pass arguments directly to # migrations that depend on it. USE_TRIGGERS = False curr_heads = alembic_migrations.get_current_alembic_heads() contract = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if contract in curr_heads: print(_('Database is up to date. No migrations needed.')) sys.exit() try: # NOTE(abhishekk): db_sync should not be used for online # migrations. self.expand(online_migration=False) self.migrate(online_migration=False) self.contract(online_migration=False) print(_('Database is synced successfully.')) except exception.GlanceException as e: sys.exit(_('Failed to sync database: ERROR: %s') % e) def _sync(self, version): """ Place an existing database under migration control and upgrade it. """ a_config = alembic_migrations.get_alembic_config() alembic_command.upgrade(a_config, version) heads = alembic_migrations.get_current_alembic_heads() if heads is None: raise exception.GlanceException("Database sync failed") revs = ", ".join(heads) if version == 'heads': print(_("Upgraded database, current revision(s):"), revs) else: print(_('Upgraded database to: %(v)s, current revision(s): %(r)s') % {'v': version, 'r': revs}) def _validate_engine(self, engine): """Check engine is valid or not. MySql is only supported for online upgrade. Adding sqlite as engine to support existing functional test cases. :param engine: database engine name """ if engine.engine.name not in ['mysql', 'sqlite']: sys.exit(_('Rolling upgrades are currently supported only for ' 'MySQL and Sqlite')) def expand(self, online_migration=True): """Run the expansion phase of a database migration.""" if online_migration: self._validate_engine(db_api.get_engine()) curr_heads = alembic_migrations.get_current_alembic_heads() expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if not expand_head: sys.exit(_('Database expansion failed. Couldn\'t find head ' 'revision of expand branch.')) elif (contract_head in curr_heads): print(_('Database is up to date. No migrations needed.')) sys.exit() if expand_head not in curr_heads: self._sync(version=expand_head) curr_heads = alembic_migrations.get_current_alembic_heads() if expand_head not in curr_heads: sys.exit(_('Database expansion failed. Database expansion ' 'should have brought the database version up to ' '"%(e_rev)s" revision. But, current revisions are' ': %(curr_revs)s ') % {'e_rev': expand_head, 'curr_revs': curr_heads}) else: print(_('Database expansion is up to date. No expansion needed.')) def contract(self, online_migration=True): """Run the contraction phase of a database migration.""" if online_migration: self._validate_engine(db_api.get_engine()) curr_heads = alembic_migrations.get_current_alembic_heads() contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if not contract_head: sys.exit(_('Database contraction failed. Couldn\'t find head ' 'revision of contract branch.')) elif (contract_head in curr_heads): print(_('Database is up to date. No migrations needed.')) sys.exit() expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) if expand_head not in curr_heads: sys.exit(_('Database contraction did not run. Database ' 'contraction cannot be run before database expansion. ' 'Run database expansion first using ' '"glance-manage db expand"')) if data_migrations.has_pending_migrations(db_api.get_engine()): sys.exit(_('Database contraction did not run. Database ' 'contraction cannot be run before data migration is ' 'complete. Run data migration using "glance-manage db ' 'migrate".')) self._sync(version=contract_head) curr_heads = alembic_migrations.get_current_alembic_heads() if contract_head not in curr_heads: sys.exit(_('Database contraction failed. Database contraction ' 'should have brought the database version up to ' '"%(e_rev)s" revision. But, current revisions are: ' '%(curr_revs)s ') % {'e_rev': expand_head, 'curr_revs': curr_heads}) def migrate(self, online_migration=True): """Run the data migration phase of a database migration.""" if online_migration: self._validate_engine(db_api.get_engine()) curr_heads = alembic_migrations.get_current_alembic_heads() contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) if (contract_head in curr_heads): print(_('Database is up to date. No migrations needed.')) sys.exit() expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) if expand_head not in curr_heads: sys.exit(_('Data migration did not run. Data migration cannot be ' 'run before database expansion. Run database ' 'expansion first using "glance-manage db expand"')) if data_migrations.has_pending_migrations(db_api.get_engine()): rows_migrated = data_migrations.migrate(db_api.get_engine()) print(_('Migrated %s rows') % rows_migrated) else: print(_('Database migration is up to date. No migration needed.')) @args('--path', metavar='', help='Path to the directory or file ' 'where json metadata is stored') @args('--merge', action='store_true', help='Merge files with data that is in the database. By default it ' 'prefers existing data over new. This logic can be changed by ' 'combining --merge option with one of these two options: ' '--prefer_new or --overwrite.') @args('--prefer_new', action='store_true', help='Prefer new metadata over existing. Existing metadata ' 'might be overwritten. Needs to be combined with --merge ' 'option.') @args('--overwrite', action='store_true', help='Drop and rewrite metadata. Needs to be combined with --merge ' 'option') def load_metadefs(self, path=None, merge=False, prefer_new=False, overwrite=False): """Load metadefinition json files to database""" metadata.db_load_metadefs(db_api.get_engine(), path, merge, prefer_new, overwrite) def unload_metadefs(self): """Unload metadefinitions from database""" metadata.db_unload_metadefs(db_api.get_engine()) @args('--path', metavar='', help='Path to the directory where ' 'json metadata files should be ' 'saved.') def export_metadefs(self, path=None): """Export metadefinitions data from database to files""" metadata.db_export_metadefs(db_api.get_engine(), path) def _purge(self, age_in_days, max_rows, purge_images_only=False): try: age_in_days = int(age_in_days) except ValueError: sys.exit(_("Invalid int value for age_in_days: " "%(age_in_days)s") % {'age_in_days': age_in_days}) try: max_rows = int(max_rows) except ValueError: sys.exit(_("Invalid int value for max_rows: " "%(max_rows)s") % {'max_rows': max_rows}) if age_in_days < 0: sys.exit(_("Must supply a non-negative value for age.")) if age_in_days >= (int(time.time()) / 86400): sys.exit(_("Maximal age is count of days since epoch.")) if max_rows < -1: sys.exit(_("Minimal rows limit is -1.")) ctx = context.get_admin_context(show_deleted=True) try: if purge_images_only: db_api.purge_deleted_rows_from_images(ctx, age_in_days, max_rows) else: db_api.purge_deleted_rows(ctx, age_in_days, max_rows) except exception.Invalid as exc: sys.exit(exc.msg) except db_exc.DBReferenceError: sys.exit(_("Purge command failed, check glance-manage" " logs for more details.")) @args('--age_in_days', type=int, help='Purge deleted rows older than age in days') @args('--max_rows', type=int, help='Limit number of records to delete. All deleted rows will be ' 'purged if equals -1.') def purge(self, age_in_days=30, max_rows=100): """Purge deleted rows older than a given age from glance tables.""" self._purge(age_in_days, max_rows) @args('--age_in_days', type=int, help='Purge deleted rows older than age in days') @args('--max_rows', type=int, help='Limit number of records to delete. All deleted rows will be ' 'purged if equals -1.') def purge_images_table(self, age_in_days=180, max_rows=100): """Purge deleted rows older than a given age from images table.""" self._purge(age_in_days, max_rows, purge_images_only=True) class DbLegacyCommands(object): """Class for managing the db using legacy commands""" def __init__(self, command_object): self.command_object = command_object def version(self): self.command_object.version() def upgrade(self, version='heads'): self.command_object.upgrade(CONF.command.version) def version_control(self, version=db_migration.ALEMBIC_INIT_VERSION): self.command_object.version_control(CONF.command.version) def sync(self, version=None): self.command_object.sync(CONF.command.version) def expand(self): self.command_object.expand() def contract(self): self.command_object.contract() def migrate(self): self.command_object.migrate() def check(self): self.command_object.check() def load_metadefs(self, path=None, merge=False, prefer_new=False, overwrite=False): self.command_object.load_metadefs(CONF.command.path, CONF.command.merge, CONF.command.prefer_new, CONF.command.overwrite) def unload_metadefs(self): self.command_object.unload_metadefs() def export_metadefs(self, path=None): self.command_object.export_metadefs(CONF.command.path) def add_legacy_command_parsers(command_object, subparsers): legacy_command_object = DbLegacyCommands(command_object) parser = subparsers.add_parser('db_version') parser.set_defaults(action_fn=legacy_command_object.version) parser.set_defaults(action='db_version') parser = subparsers.add_parser('db_upgrade') parser.set_defaults(action_fn=legacy_command_object.upgrade) parser.add_argument('version', nargs='?') parser.set_defaults(action='db_upgrade') parser = subparsers.add_parser('db_version_control') parser.set_defaults(action_fn=legacy_command_object.version_control) parser.add_argument('version', nargs='?') parser.set_defaults(action='db_version_control') parser = subparsers.add_parser('db_sync') parser.set_defaults(action_fn=legacy_command_object.sync) parser.add_argument('version', nargs='?') parser.set_defaults(action='db_sync') parser = subparsers.add_parser('db_expand') parser.set_defaults(action_fn=legacy_command_object.expand) parser.set_defaults(action='db_expand') parser = subparsers.add_parser('db_contract') parser.set_defaults(action_fn=legacy_command_object.contract) parser.set_defaults(action='db_contract') parser = subparsers.add_parser('db_migrate') parser.set_defaults(action_fn=legacy_command_object.migrate) parser.set_defaults(action='db_migrate') parser = subparsers.add_parser('db_check') parser.set_defaults(action_fn=legacy_command_object.check) parser.set_defaults(action='db_check') parser = subparsers.add_parser('db_load_metadefs') parser.set_defaults(action_fn=legacy_command_object.load_metadefs) parser.add_argument('path', nargs='?') parser.add_argument('merge', nargs='?') parser.add_argument('prefer_new', nargs='?') parser.add_argument('overwrite', nargs='?') parser.set_defaults(action='db_load_metadefs') parser = subparsers.add_parser('db_unload_metadefs') parser.set_defaults(action_fn=legacy_command_object.unload_metadefs) parser.set_defaults(action='db_unload_metadefs') parser = subparsers.add_parser('db_export_metadefs') parser.set_defaults(action_fn=legacy_command_object.export_metadefs) parser.add_argument('path', nargs='?') parser.set_defaults(action='db_export_metadefs') def add_command_parsers(subparsers): command_object = DbCommands() parser = subparsers.add_parser('db') parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): # FIXME(basha): hack to assume dest is the arg name without # the leading hyphens if no dest is supplied kwargs.setdefault('dest', args[0][2:]) if kwargs['dest'].startswith('action_kwarg_'): action_kwargs.append( kwargs['dest'][len('action_kwarg_'):]) else: action_kwargs.append(kwargs['dest']) kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) parser.add_argument('action_args', nargs='*') add_legacy_command_parsers(command_object, subparsers) command_opt = cfg.SubCommandOpt('command', title='Commands', help='Available commands', handler=add_command_parsers) CATEGORIES = { 'db': DbCommands, } def methods_of(obj): """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def main(): CONF.register_cli_opt(command_opt) if len(sys.argv) < 2: script_name = sys.argv[0] print("%s category action []" % script_name) print(_("Available categories:")) for category in CATEGORIES: print(_("\t%s") % category) sys.exit(2) try: logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) cfg_files = cfg.find_config_files(project='glance', prog='glance-api') cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-manage')) config.parse_args(default_config_files=cfg_files) config.set_config_defaults() logging.setup(CONF, 'glance') except RuntimeError as e: sys.exit("ERROR: %s" % e) try: if CONF.command.action.startswith('db'): return CONF.command.action_fn() else: func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue if isinstance(v, str): v = encodeutils.safe_decode(v) func_kwargs[k] = v func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except exception.GlanceException as e: sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/replicator.py0000664000175000017500000006571400000000000017605 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2012 Michael Still and Canonical Inc # Copyright 2014 SoftLayer Technologies, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import os import sys import urllib.parse as urlparse from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import uuidutils from webob import exc from glance.common import config from glance.common import exception from glance.common import utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) # NOTE: positional arguments will be parsed before until # this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428 cli_opts = [ cfg.IntOpt('chunksize', short='c', default=65536, help="Amount of data to transfer per HTTP write."), cfg.StrOpt('dontreplicate', short='D', default=('created_at date deleted_at location updated_at'), help="List of fields to not replicate."), cfg.BoolOpt('metaonly', short='m', default=False, help="Only replicate metadata, not images."), cfg.StrOpt('token', short='t', default='', help=("Pass in your authentication token if you have " "one. If you use this option the same token is " "used for both the source and the target.")), cfg.StrOpt('command', positional=True, required=False, help="Command to be given to replicator"), cfg.MultiStrOpt('args', positional=True, required=False, help="Arguments for the command"), ] CONF = cfg.CONF CONF.register_cli_opts(cli_opts) CONF.register_opt( cfg.StrOpt('sourcetoken', default='', help=("Pass in your authentication token if you have " "one. This is the token used for the source."))) CONF.register_opt( cfg.StrOpt('targettoken', default='', help=("Pass in your authentication token if you have " "one. This is the token used for the target."))) logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) COMMANDS = """Commands: help Output help for one of the commands below compare What is missing from the target glance? dump Dump the contents of a glance instance to local disk. livecopy Load the contents of one glance instance into another. load Load the contents of a local directory into glance. size Determine the size of a glance instance if dumped to disk. """ IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' 'the target, but our check for it did ' 'not find it. This indicates that we ' 'do not have permissions to see all ' 'the images on the target server.') class ImageService(object): def __init__(self, conn, auth_token): """Initialize the ImageService. :param conn: a http.client.HTTPConnection to the glance server :param auth_token: authentication token to pass in the x-auth-token header """ self.auth_token = auth_token self.conn = conn def _http_request(self, method, url, headers, body, ignore_result_body=False): """Perform an HTTP request against the server. method: the HTTP method to use url: the URL to request (not including server portion) headers: headers for the request body: body to send with the request ignore_result_body: the body of the result will be ignored :returns: A http.client response object """ if self.auth_token: headers.setdefault('x-auth-token', self.auth_token) LOG.debug('Request: %(method)s http://%(server)s:%(port)s' '%(url)s with headers %(headers)s', {'method': method, 'server': self.conn.host, 'port': self.conn.port, 'url': url, 'headers': repr(headers)}) self.conn.request(method, url, body, headers) response = self.conn.getresponse() headers = self._header_list_to_dict(response.getheaders()) code = response.status code_description = http.responses[code] LOG.debug('Response: %(code)s %(status)s %(headers)s', {'code': code, 'status': code_description, 'headers': repr(headers)}) if code == http.BAD_REQUEST: raise exc.HTTPBadRequest( explanation=response.read()) if code == http.INTERNAL_SERVER_ERROR: raise exc.HTTPInternalServerError( explanation=response.read()) if code == http.UNAUTHORIZED: raise exc.HTTPUnauthorized( explanation=response.read()) if code == http.FORBIDDEN: raise exc.HTTPForbidden( explanation=response.read()) if code == http.CONFLICT: raise exc.HTTPConflict( explanation=response.read()) if ignore_result_body: # NOTE: because we are pipelining requests through a single HTTP # connection, http.client requires that we read the response body # before we can make another request. If the caller knows they # don't care about the body, they can ask us to do that for them. response.read() return response def get_images(self): """Return a detailed list of images. Yields a series of images as dicts containing metadata. """ params = {'is_public': None} while True: url = '/v1/images/detail' query = urlparse.urlencode(params) if query: url += '?%s' % query response = self._http_request('GET', url, {}, '') result = jsonutils.loads(response.read()) if not result or 'images' not in result or not result['images']: return for image in result.get('images', []): params['marker'] = image['id'] yield image def get_image(self, image_uuid): """Fetch image data from glance. image_uuid: the id of an image :returns: a http.client Response object where the body is the image. """ url = '/v1/images/%s' % image_uuid return self._http_request('GET', url, {}, '') @staticmethod def _header_list_to_dict(headers): """Expand a list of headers into a dictionary. headers: a list of [(key, value), (key, value), (key, value)] Returns: a dictionary representation of the list """ d = {} for (header, value) in headers: if header.startswith('x-image-meta-property-'): prop = header.replace('x-image-meta-property-', '') d.setdefault('properties', {}) d['properties'][prop] = value else: d[header.replace('x-image-meta-', '')] = value return d def get_image_meta(self, image_uuid): """Return the metadata for a single image. image_uuid: the id of an image Returns: image metadata as a dictionary """ url = '/v1/images/%s' % image_uuid response = self._http_request('HEAD', url, {}, '', ignore_result_body=True) return self._header_list_to_dict(response.getheaders()) @staticmethod def _dict_to_headers(d): """Convert a dictionary into one suitable for a HTTP request. d: a dictionary Returns: the same dictionary, with x-image-meta added to every key """ h = {} for key in d: if key == 'properties': for subkey in d[key]: if d[key][subkey] is None: h['x-image-meta-property-%s' % subkey] = '' else: h['x-image-meta-property-%s' % subkey] = d[key][subkey] else: h['x-image-meta-%s' % key] = d[key] return h def add_image(self, image_meta, image_data): """Upload an image. image_meta: image metadata as a dictionary image_data: image data as a object with a read() method Returns: a tuple of (http response headers, http response body) """ url = '/v1/images' headers = self._dict_to_headers(image_meta) headers['Content-Type'] = 'application/octet-stream' headers['Content-Length'] = int(image_meta['size']) response = self._http_request('POST', url, headers, image_data) headers = self._header_list_to_dict(response.getheaders()) LOG.debug('Image post done') body = response.read() return headers, body def add_image_meta(self, image_meta): """Update image metadata. image_meta: image metadata as a dictionary Returns: a tuple of (http response headers, http response body) """ url = '/v1/images/%s' % image_meta['id'] headers = self._dict_to_headers(image_meta) headers['Content-Type'] = 'application/octet-stream' response = self._http_request('PUT', url, headers, '') headers = self._header_list_to_dict(response.getheaders()) LOG.debug('Image post done') body = response.read() return headers, body def get_image_service(): """Get a copy of the image service. This is done like this to make it easier to mock out ImageService. """ return ImageService def _human_readable_size(num, suffix='B'): for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f %s%s" % (num, 'Yi', suffix) def replication_size(options, args): """%(prog)s size Determine the size of a glance instance if dumped to disk. server:port: the location of the glance instance. """ # Make sure server info is provided if args is None or len(args) < 1: raise TypeError(_("Too few arguments.")) server, port = utils.parse_valid_host_port(args.pop()) total_size = 0 count = 0 imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.targettoken) for image in client.get_images(): LOG.debug('Considering image: %(image)s', {'image': image}) if image['status'] == 'active': total_size += int(image['size']) count += 1 print(_('Total size is %(size)d bytes (%(human_size)s) across ' '%(img_count)d images') % {'size': total_size, 'human_size': _human_readable_size(total_size), 'img_count': count}) def replication_dump(options, args): """%(prog)s dump Dump the contents of a glance instance to local disk. server:port: the location of the glance instance. path: a directory on disk to contain the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.sourcetoken) for image in client.get_images(): LOG.debug('Considering: %(image_id)s (%(image_name)s) ' '(%(image_size)d bytes)', {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size']}) data_path = os.path.join(path, image['id']) data_filename = data_path + '.img' if not os.path.exists(data_path): LOG.info(_LI('Storing: %(image_id)s (%(image_name)s)' ' (%(image_size)d bytes) in %(data_filename)s'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size'], 'data_filename': data_filename}) # Dump glance information with open(data_path, 'w', encoding='utf-8') as f: f.write(jsonutils.dumps(image)) if image['status'] == 'active' and not options.metaonly: # Now fetch the image. The metadata returned in headers here # is the same as that which we got from the detailed images # request earlier, so we can ignore it here. Note that we also # only dump active images. LOG.debug('Image %s is active', image['id']) image_response = client.get_image(image['id']) with open(data_filename, 'wb') as f: while True: chunk = image_response.read(options.chunksize) if not chunk: break f.write(chunk) def _dict_diff(a, b): """A one way dictionary diff. a: a dictionary b: a dictionary Returns: True if the dictionaries are different """ # Only things the source has which the target lacks matter if set(a.keys()) - set(b.keys()): LOG.debug('metadata diff -- source has extra keys: %(keys)s', {'keys': ' '.join(set(a.keys()) - set(b.keys()))}) return True for key in a: if str(a[key]) != str(b[key]): LOG.debug('metadata diff -- value differs for key ' '%(key)s: source "%(source_value)s" vs ' 'target "%(target_value)s"', {'key': key, 'source_value': a[key], 'target_value': b[key]}) return True return False def replication_load(options, args): """%(prog)s load Load the contents of a local directory into glance. server:port: the location of the glance instance. path: a directory on disk containing the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http.HTTPConnection(server, port), options.targettoken) updated = [] for ent in os.listdir(path): if uuidutils.is_uuid_like(ent): image_uuid = ent LOG.info(_LI('Considering: %s'), image_uuid) meta_file_name = os.path.join(path, image_uuid) with open(meta_file_name) as meta_file: meta = jsonutils.loads(meta_file.read()) # Remove keys which don't make sense for replication for key in options.dontreplicate.split(' '): if key in meta: LOG.debug('Stripping %(header)s from saved ' 'metadata', {'header': key}) del meta[key] if _image_present(client, image_uuid): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. LOG.debug('Image %s already present', image_uuid) headers = client.get_image_meta(image_uuid) for key in options.dontreplicate.split(' '): if key in headers: LOG.debug('Stripping %(header)s from target ' 'metadata', {'header': key}) del headers[key] if _dict_diff(meta, headers): LOG.info(_LI('Image %s metadata has changed'), image_uuid) headers, body = client.add_image_meta(meta) _check_upload_response_headers(headers, body) updated.append(meta['id']) else: if not os.path.exists(os.path.join(path, image_uuid + '.img')): LOG.debug('%s dump is missing image data, skipping', image_uuid) continue # Upload the image itself with open(os.path.join(path, image_uuid + '.img')) as img_file: try: headers, body = client.add_image(meta, img_file) _check_upload_response_headers(headers, body) updated.append(meta['id']) except exc.HTTPConflict: # NOTE(tkajinam): noqa does not work with multi-line, # so split this interpolation to a separate line msg = _LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image_uuid LOG.error(msg) return updated def replication_livecopy(options, args): """%(prog)s livecopy Load the contents of one glance instance into another. fromserver:port: the location of the source glance instance. toserver:port: the location of the target glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() target_server, target_port = utils.parse_valid_host_port(args.pop()) target_conn = http.HTTPConnection(target_server, target_port) target_client = imageservice(target_conn, options.targettoken) source_server, source_port = utils.parse_valid_host_port(args.pop()) source_conn = http.HTTPConnection(source_server, source_port) source_client = imageservice(source_conn, options.sourcetoken) updated = [] for image in source_client.get_images(): LOG.debug('Considering %(id)s', {'id': image['id']}) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source metadata', {'header': key}) del image[key] if _image_present(target_client, image['id']): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. headers = target_client.get_image_meta(image['id']) if headers['status'] == 'active': for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source ' 'metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from target ' 'metadata', {'header': key}) del headers[key] if _dict_diff(image, headers): LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' 'metadata has changed'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--')}) headers, body = target_client.add_image_meta(image) _check_upload_response_headers(headers, body) updated.append(image['id']) elif image['status'] == 'active': LOG.info(_LI('Image %(image_id)s (%(image_name)s) ' '(%(image_size)d bytes) ' 'is being synced'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed--'), 'image_size': image['size']}) if not options.metaonly: image_response = source_client.get_image(image['id']) try: headers, body = target_client.add_image(image, image_response) _check_upload_response_headers(headers, body) updated.append(image['id']) except exc.HTTPConflict: LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa return updated def replication_compare(options, args): """%(prog)s compare Compare the contents of fromserver with those of toserver. fromserver:port: the location of the source glance instance. toserver:port: the location of the target glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() target_server, target_port = utils.parse_valid_host_port(args.pop()) target_conn = http.HTTPConnection(target_server, target_port) target_client = imageservice(target_conn, options.targettoken) source_server, source_port = utils.parse_valid_host_port(args.pop()) source_conn = http.HTTPConnection(source_server, source_port) source_client = imageservice(source_conn, options.sourcetoken) differences = {} for image in source_client.get_images(): if _image_present(target_client, image['id']): headers = target_client.get_image_meta(image['id']) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from source metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from target metadata', {'header': key}) del headers[key] for key in image: if image[key] != headers.get(key): LOG.warning(_LW('%(image_id)s: field %(key)s differs ' '(source is %(source_value)s, destination ' 'is %(target_value)s)'), {'image_id': image['id'], 'key': key, 'source_value': image[key], 'target_value': headers.get(key, 'undefined')}) differences[image['id']] = 'diff' else: LOG.debug('%(image_id)s is identical', {'image_id': image['id']}) elif image['status'] == 'active': LOG.warning(_LW('Image %(image_id)s ("%(image_name)s") ' 'entirely missing from the destination'), {'image_id': image['id'], 'image_name': image.get('name', '--unnamed')}) differences[image['id']] = 'missing' return differences def _check_upload_response_headers(headers, body): """Check that the headers of an upload are reasonable. headers: the headers from the upload body: the body from the upload """ if 'status' not in headers: try: d = jsonutils.loads(body) if 'image' in d and 'status' in d['image']: return except Exception: raise exception.UploadException(body) def _image_present(client, image_uuid): """Check if an image is present in glance. client: the ImageService image_uuid: the image uuid to check Returns: True if the image is present """ headers = client.get_image_meta(image_uuid) return 'status' in headers def print_help(options, args): """Print help specific to a command. options: the parsed command line options args: the command line """ if not args: print(COMMANDS) else: command_name = args.pop() command = lookup_command(command_name) print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) def lookup_command(command_name): """Lookup a command. command_name: the command name Returns: a method which implements that command """ BASE_COMMANDS = {'help': print_help} REPLICATION_COMMANDS = {'compare': replication_compare, 'dump': replication_dump, 'livecopy': replication_livecopy, 'load': replication_load, 'size': replication_size} commands = {} for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS): commands.update(command_set) try: command = commands[command_name] except KeyError: if command_name: sys.exit(_("Unknown command: %s") % command_name) else: command = commands['help'] return command def main(): """The main function.""" try: config.parse_args() except RuntimeError as e: sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) except SystemExit: sys.exit("Please specify one command") # Setup logging logging.setup(CONF, 'glance') if CONF.token: CONF.sourcetoken = CONF.token CONF.targettoken = CONF.token command = lookup_command(CONF.command) try: command(CONF, CONF.args) except TypeError as e: LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) except ValueError as e: LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/scrubber.py0000664000175000017500000001372400000000000017242 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Scrub Service """ import os import sys import eventlet # NOTE(jokke): As per the eventlet commit # b756447bab51046dfc6f1e0e299cc997ab343701 there's circular import happening # which can be solved making sure the hubs are properly and fully imported # before calling monkey_patch(). This is solved in eventlet 0.22.0 but we # need to address it before that is widely used around. eventlet.hubs.get_hub() if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.patcher.monkey_patch(os=False) else: eventlet.patcher.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading import threading orig_threading.current_thread.__globals__['_active'] = threading._active import subprocess # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from os_win import utilsfactory as os_win_utilsfactory from oslo_config import cfg from oslo_log import log as logging from glance.common import config from glance.common import exception from glance import scrubber CONF = cfg.CONF logging.register_options(CONF) CONF.set_default(name='use_stderr', default=True) def main(): # Used on Window, ensuring that a single scrubber can run at a time. mutex = None mutex_acquired = False try: if os.name == 'nt': # We can't rely on process names on Windows as there may be # wrappers with the same name. mutex = os_win_utilsfactory.get_mutex( name='Global\\glance-scrubber') mutex_acquired = mutex.acquire(timeout_ms=0) CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts) CONF.register_opts(scrubber.scrubber_cmd_opts) config.parse_args() logging.setup(CONF, 'glance') CONF.import_opt('enabled_backends', 'glance.common.wsgi') if CONF.enabled_backends: glance_store.register_store_opts(CONF) glance_store.create_multi_stores(CONF) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() if CONF.restore and CONF.daemon: sys.exit("ERROR: The restore and daemon options should not be set " "together. Please use either of them in one request.") app = scrubber.Scrubber(glance_store) if CONF.restore: if os.name == 'nt': scrubber_already_running = not mutex_acquired else: scrubber_already_running = scrubber_already_running_posix() if scrubber_already_running: already_running_msg = ( "ERROR: glance-scrubber is already running. " "Please ensure that the daemon is stopped.") sys.exit(already_running_msg) app.revert_image_status(CONF.restore) elif CONF.daemon: server = scrubber.Daemon(CONF.wakeup_time) server.start(app) server.wait() else: app.run() except (exception.ImageNotFound, exception.Conflict) as e: sys.exit("ERROR: %s" % e) except RuntimeError as e: sys.exit("ERROR: %s" % e) finally: if mutex and mutex_acquired: mutex.release() def scrubber_already_running_posix(): # Try to check the glance-scrubber is running or not. # 1. Try to find the pid file if scrubber is controlled by # glance-control # 2. Try to check the process name. pid_file = '/var/run/glance/glance-scrubber.pid' if os.path.exists(os.path.abspath(pid_file)): return True for glance_scrubber_name in ['glance-scrubber', 'glance.cmd.scrubber']: cmd = subprocess.Popen( ['/usr/bin/pgrep', '-f', glance_scrubber_name], stdout=subprocess.PIPE, shell=False) pids, _ = cmd.communicate() # The response format of subprocess.Popen.communicate() is # diffderent between py2 and py3. It's "string" in py2, but # "bytes" in py3. if isinstance(pids, bytes): pids = pids.decode() self_pid = os.getpid() if pids.count('\n') > 1 and str(self_pid) in pids: # One process is self, so if the process number is > 1, it # means that another glance-scrubber process is running. return True elif pids.count('\n') > 0 and str(self_pid) not in pids: # If self is not in result and the pids number is still # > 0, it means that the another glance-scrubber process is # running. return True return False if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/cmd/status.py0000664000175000017500000000555400000000000016760 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import glance_store from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from glance.common import removed_config from glance.common import wsgi # noqa CONF = cfg.CONF SUCCESS = upgradecheck.Code.SUCCESS FAILURE = upgradecheck.Code.FAILURE class Checks(upgradecheck.UpgradeCommands): """Programmable upgrade checks.""" def __init__(self, *args, **kwargs): super(upgradecheck.UpgradeCommands).__init__(*args, **kwargs) removed_config.register_removed_options() def _check_sheepdog_store(self): """Check that the removed sheepdog backend store is not configured.""" glance_store.register_opts(CONF) sheepdog_present = False if 'sheepdog' in (getattr(CONF, 'enabled_backends') or {}): sheepdog_present = True if 'sheepdog' in (getattr(CONF.glance_store, 'stores') or []): sheepdog_present = True if sheepdog_present: return upgradecheck.Result( FAILURE, 'The "sheepdog" backend store driver has been removed, but ' 'current settings have it configured.') return upgradecheck.Result(SUCCESS) def _check_owner_is_tenant(self): if CONF.owner_is_tenant is False: return upgradecheck.Result( FAILURE, 'The "owner_is_tenant" option has been removed and there is ' 'no upgrade path for installations that had this option set ' 'to False.') return upgradecheck.Result(SUCCESS) _upgrade_checks = ( # Added in Ussuri ('Sheepdog Driver Removal', _check_sheepdog_store), # Added in Wallaby ('Policy File JSON to YAML Migration', (common_checks.check_policy_json, {'conf': CONF})), # Removed in Wallaby ('Config option owner_is_tenant removal', _check_owner_is_tenant), ) def main(): try: return upgradecheck.main(CONF, 'glance', Checks()) except cfg.ConfigDirNotFoundError: return ('ERROR: cannot read the glance configuration directory.\n' 'Please re-run using the --config-dir option ' 'with a valid glance configuration directory.') if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.858303 glance-29.0.0/glance/common/0000775000175000017500000000000000000000000015577 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/__init__.py0000664000175000017500000000000000000000000017676 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/auth.py0000664000175000017500000002744400000000000017125 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This auth module is intended to allow OpenStack client-tools to select from a variety of authentication strategies, including NoAuth (the default), and Keystone (an identity management system). :: > auth_plugin = AuthPlugin(creds) > auth_plugin.authenticate() > auth_plugin.auth_token abcdefg > auth_plugin.management_url http://service_endpoint/ """ import http.client as http import urllib.parse as urlparse import httplib2 from keystoneclient import service_catalog as ks_service_catalog from oslo_serialization import jsonutils from glance.common import exception from glance.i18n import _ class BaseStrategy(object): def __init__(self): self.auth_token = None # TODO(sirp): Should expose selecting public/internal/admin URL. self.management_url = None def authenticate(self): raise NotImplementedError @property def is_authenticated(self): raise NotImplementedError @property def strategy(self): raise NotImplementedError class NoAuthStrategy(BaseStrategy): def authenticate(self): pass @property def is_authenticated(self): return True @property def strategy(self): return 'noauth' class KeystoneStrategy(BaseStrategy): MAX_REDIRECTS = 10 def __init__(self, creds, insecure=False, configure_via_auth=True): self.creds = creds self.insecure = insecure self.configure_via_auth = configure_via_auth super(KeystoneStrategy, self).__init__() def check_auth_params(self): # Ensure that supplied credential parameters are as required for required in ('username', 'password', 'auth_url', 'strategy'): if self.creds.get(required) is None: raise exception.MissingCredentialError(required=required) if self.creds['strategy'] != 'keystone': raise exception.BadAuthStrategy(expected='keystone', received=self.creds['strategy']) # For v2.0 also check tenant is present if self.creds['auth_url'].rstrip('/').endswith('v2.0'): if self.creds.get("tenant") is None: raise exception.MissingCredentialError(required='tenant') # For v3 also check project is present if self.creds['auth_url'].rstrip('/').endswith('v3'): if self.creds.get("project") is None: raise exception.MissingCredentialError(required='project') def authenticate(self): """Authenticate with the Keystone service. There are a few scenarios to consider here: 1. Which version of Keystone are we using? v1 which uses headers to pass the credentials, or v2 which uses a JSON encoded request body? 2. Keystone may respond back with a redirection using a 305 status code. 3. We may attempt a v1 auth when v2 is what's called for. In this case, we rewrite the url to contain /v2.0/ and retry using the v2 protocol. """ def _authenticate(auth_url): # If OS_AUTH_URL is missing a trailing slash add one if not auth_url.endswith('/'): auth_url += '/' token_url = urlparse.urljoin(auth_url, "tokens") # 1. Check Keystone version is_v2 = auth_url.rstrip('/').endswith('v2.0') is_v3 = auth_url.rstrip('/').endswith('v3') if is_v3: token_url = urlparse.urljoin(auth_url, "auth/tokens") self._v3_auth(token_url) elif is_v2: self._v2_auth(token_url) else: self._v1_auth(token_url) self.check_auth_params() auth_url = self.creds['auth_url'] for redirect_iter in range(self.MAX_REDIRECTS): try: _authenticate(auth_url) except exception.AuthorizationRedirect as e: # 2. Keystone may redirect us auth_url = e.url except exception.AuthorizationFailure: # 3. In some configurations nova makes redirection to # v2.0 keystone endpoint. Also, new location does not # contain real endpoint, only hostname and port. if 'v2.0' not in auth_url: auth_url = urlparse.urljoin(auth_url, 'v2.0/') else: # If we successfully auth'd, then memorize the correct auth_url # for future use. self.creds['auth_url'] = auth_url break else: # Guard against a redirection loop raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS) def _v1_auth(self, token_url): creds = self.creds headers = { 'X-Auth-User': creds['username'], 'X-Auth-Key': creds['password'] } tenant = creds.get('tenant') if tenant: headers['X-Auth-Tenant'] = tenant resp, resp_body = self._do_request(token_url, 'GET', headers=headers) def _management_url(self, resp): for url_header in ('x-image-management-url', 'x-server-management-url', 'x-glance'): try: return resp[url_header] except KeyError as e: not_found = e raise not_found if resp.status in (http.OK, http.NO_CONTENT): try: if self.configure_via_auth: self.management_url = _management_url(self, resp) self.auth_token = resp['x-auth-token'] except KeyError: raise exception.AuthorizationFailure() elif resp.status == http.USE_PROXY: raise exception.AuthorizationRedirect(uri=resp['location']) elif resp.status == http.BAD_REQUEST: raise exception.AuthBadRequest(url=token_url) elif resp.status == http.UNAUTHORIZED: raise exception.NotAuthenticated() elif resp.status == http.NOT_FOUND: raise exception.AuthUrlNotFound(url=token_url) else: raise Exception(_('Unexpected response: %s') % resp.status) def _v3_auth(self, token_url): creds = { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": self.creds['username'], "domain": {"id": self.creds['user_domain_id']}, "password": self.creds['password'] } } }, "scope": { "project": { "name": self.creds['project'], "domain": { "id": self.creds['project_domain_id'] } } } } } headers = {'Content-Type': 'application/json'} req_body = jsonutils.dumps(creds) resp, resp_body = self._do_request( token_url, 'POST', headers=headers, body=req_body) resp_body = jsonutils.loads(resp_body) if resp.status == 201: resp_auth = resp['x-subject-token'] creds_region = self.creds.get('region') if self.configure_via_auth: endpoint = get_endpoint(resp_body['token']['catalog'], endpoint_region=creds_region) self.management_url = endpoint self.auth_token = resp_auth elif resp.status == 305: raise exception.RedirectException(resp['location']) elif resp.status == 400: raise exception.AuthBadRequest(url=token_url) elif resp.status == 401: raise Exception(_('Unexpected response: %s') % resp.status) def _v2_auth(self, token_url): creds = self.creds creds = { "auth": { "tenantName": creds['tenant'], "passwordCredentials": { "username": creds['username'], "password": creds['password'] } } } headers = {'Content-Type': 'application/json'} req_body = jsonutils.dumps(creds) resp, resp_body = self._do_request( token_url, 'POST', headers=headers, body=req_body) if resp.status == http.OK: resp_auth = jsonutils.loads(resp_body)['access'] creds_region = self.creds.get('region') if self.configure_via_auth: endpoint = get_endpoint(resp_auth['serviceCatalog'], endpoint_region=creds_region) self.management_url = endpoint self.auth_token = resp_auth['token']['id'] elif resp.status == http.USE_PROXY: raise exception.RedirectException(resp['location']) elif resp.status == http.BAD_REQUEST: raise exception.AuthBadRequest(url=token_url) elif resp.status == http.UNAUTHORIZED: raise exception.NotAuthenticated() elif resp.status == http.NOT_FOUND: raise exception.AuthUrlNotFound(url=token_url) else: raise Exception(_('Unexpected response: %s') % resp.status) @property def is_authenticated(self): return self.auth_token is not None @property def strategy(self): return 'keystone' def _do_request(self, url, method, headers=None, body=None): headers = headers or {} conn = httplib2.Http() conn.force_exception_to_status_code = True conn.disable_ssl_certificate_validation = self.insecure headers['User-Agent'] = 'glance-client' resp, resp_body = conn.request(url, method, headers=headers, body=body) return resp, resp_body def get_plugin_from_strategy(strategy, creds=None, insecure=False, configure_via_auth=True): if strategy == 'noauth': return NoAuthStrategy() elif strategy == 'keystone': return KeystoneStrategy(creds, insecure, configure_via_auth=configure_via_auth) else: raise Exception(_("Unknown auth strategy '%s'") % strategy) def get_endpoint(service_catalog, service_type='image', endpoint_region=None, endpoint_type='publicURL'): """ Select an endpoint from the service catalog We search the full service catalog for services matching both type and region. If the client supplied no region then any 'image' endpoint is considered a match. There must be one -- and only one -- successful match in the catalog, otherwise we will raise an exception. """ endpoints = ks_service_catalog.ServiceCatalogV2( {'serviceCatalog': service_catalog} ).get_urls(service_type=service_type, region_name=endpoint_region, endpoint_type=endpoint_type) if endpoints is None: raise exception.NoServiceEndpoint() elif len(endpoints) == 1: return endpoints[0] else: raise exception.RegionAmbiguity(region=endpoint_region) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/client.py0000664000175000017500000005177400000000000017445 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # HTTPSClientAuthConnection code comes courtesy of ActiveState website: # http://code.activestate.com/recipes/ # 577548-https-httplib-client-connection-with-certificate-v/ import collections.abc import copy import functools import http.client import os import re import urllib.parse as urlparse try: from eventlet.green import socket from eventlet.green import ssl except ImportError: import socket import ssl import osprofiler.web from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import netutils from glance.common import auth from glance.common import exception from glance.common import utils from glance.i18n import _ LOG = logging.getLogger(__name__) # common chunk size for get and put CHUNKSIZE = 65536 VERSION_REGEX = re.compile(r"/?v[0-9\.]+") def handle_unauthenticated(func): """ Wrap a function to re-authenticate and retry. """ @functools.wraps(func) def wrapped(self, *args, **kwargs): try: return func(self, *args, **kwargs) except exception.NotAuthenticated: self._authenticate(force_reauth=True) return func(self, *args, **kwargs) return wrapped def handle_redirects(func): """ Wrap the _do_request function to handle HTTP redirects. """ MAX_REDIRECTS = 5 @functools.wraps(func) def wrapped(self, method, url, body, headers): for i in range(MAX_REDIRECTS): try: return func(self, method, url, body, headers) except exception.RedirectException as redirect: if redirect.url is None: raise exception.InvalidRedirect() url = redirect.url raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) return wrapped class HTTPSClientAuthConnection(http.client.HTTPSConnection): """ Class to make a HTTPS connection, with support for full client-based SSL Authentication :see http://code.activestate.com/recipes/ 577548-https-httplib-client-connection-with-certificate-v/ """ def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None, insecure=False): http.client.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.timeout = timeout self.insecure = insecure def connect(self): """ Connect to a host on a given (SSL) port. If ca_file is pointing somewhere, use it to check Server Certificate. Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to ssl.wrap_socket(), which forces SSL to check server certificate against our client certificate. """ sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() # Check CA file unless 'insecure' is specified if self.insecure is True: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=ssl.CERT_NONE) else: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ca_certs=self.ca_file, cert_reqs=ssl.CERT_REQUIRED) class BaseClient(object): """A base client class""" DEFAULT_PORT = 80 DEFAULT_DOC_ROOT = None # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, # Suse, FreeBSD/OpenBSD DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:' '/etc/pki/tls/certs/ca-bundle.crt:' '/etc/ssl/ca-bundle.pem:' '/etc/ssl/cert.pem') OK_RESPONSE_CODES = ( http.client.OK, http.client.CREATED, http.client.ACCEPTED, http.client.NO_CONTENT, ) REDIRECT_RESPONSE_CODES = ( http.client.MOVED_PERMANENTLY, http.client.FOUND, http.client.SEE_OTHER, http.client.USE_PROXY, http.client.TEMPORARY_REDIRECT, ) def __init__(self, host, port=None, timeout=None, use_ssl=False, auth_token=None, creds=None, doc_root=None, key_file=None, cert_file=None, ca_file=None, insecure=False, configure_via_auth=True): """ Creates a new client to some service. :param host: The host where service resides :param port: The port where service resides :param timeout: Connection timeout. :param use_ssl: Should we use HTTPS? :param auth_token: The auth token to pass to the server :param creds: The credentials to pass to the auth plugin :param doc_root: Prefix for all URLs we request from host :param key_file: Optional PEM-formatted file that contains the private key. If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_KEY_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param cert_file: Optional PEM-formatted certificate chain file. If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_CERT_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param ca_file: Optional CA cert file to use in SSL connections If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_CA_FILE is looked for. :param insecure: Optional. If set then the server's certificate will not be verified. :param configure_via_auth: Optional. Defaults to True. If set, the URL returned from the service catalog for the image endpoint will **override** the URL supplied to in the host parameter. """ self.host = host self.port = port or self.DEFAULT_PORT self.timeout = timeout # A value of '0' implies never timeout if timeout == 0: self.timeout = None self.use_ssl = use_ssl self.auth_token = auth_token self.creds = creds or {} self.connection = None self.configure_via_auth = configure_via_auth # doc_root can be a nullstring, which is valid, and why we # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below. self.doc_root = (doc_root if doc_root is not None else self.DEFAULT_DOC_ROOT) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.insecure = insecure self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure) self.connect_kwargs = self.get_connect_kwargs() def get_connect_kwargs(self): # Both secure and insecure connections have a timeout option connect_kwargs = {'timeout': self.timeout} if self.use_ssl: if self.key_file is None: self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE') if self.cert_file is None: self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE') if self.ca_file is None: self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE') # Check that key_file/cert_file are either both set or both unset if self.cert_file is not None and self.key_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a cert, " "however you have failed to supply either a " "key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable") raise exception.ClientConnectionError(msg) if self.key_file is not None and self.cert_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a key, " "however you have failed to supply either a " "cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable") raise exception.ClientConnectionError(msg) if (self.key_file is not None and not os.path.exists(self.key_file)): msg = _("The key file you specified %s does not " "exist") % self.key_file raise exception.ClientConnectionError(msg) connect_kwargs['key_file'] = self.key_file if (self.cert_file is not None and not os.path.exists(self.cert_file)): msg = _("The cert file you specified %s does not " "exist") % self.cert_file raise exception.ClientConnectionError(msg) connect_kwargs['cert_file'] = self.cert_file if (self.ca_file is not None and not os.path.exists(self.ca_file)): msg = _("The CA file you specified %s does not " "exist") % self.ca_file raise exception.ClientConnectionError(msg) if self.ca_file is None: for ca in self.DEFAULT_CA_FILE_PATH.split(":"): if os.path.exists(ca): self.ca_file = ca break connect_kwargs['ca_file'] = self.ca_file connect_kwargs['insecure'] = self.insecure return connect_kwargs def configure_from_url(self, url): """ Setups the connection based on the given url. The form is:: ://:port/doc_root """ LOG.debug("Configuring from URL: %s", url) parsed = urlparse.urlparse(url) self.use_ssl = parsed.scheme == 'https' self.host = parsed.hostname self.port = parsed.port or 80 self.doc_root = parsed.path.rstrip('/') # We need to ensure a version identifier is appended to the doc_root if not VERSION_REGEX.match(self.doc_root): if self.DEFAULT_DOC_ROOT: doc_root = self.DEFAULT_DOC_ROOT.lstrip('/') self.doc_root += '/' + doc_root LOG.debug("Appending doc_root %(doc_root)s to URL %(url)s", {'doc_root': doc_root, 'url': url}) # ensure connection kwargs are re-evaluated after the service catalog # publicURL is parsed for potential SSL usage self.connect_kwargs = self.get_connect_kwargs() def make_auth_plugin(self, creds, insecure): """ Returns an instantiated authentication plugin. """ strategy = creds.get('strategy', 'noauth') plugin = auth.get_plugin_from_strategy(strategy, creds, insecure, self.configure_via_auth) return plugin def get_connection_type(self): """ Returns the proper connection type """ if self.use_ssl: return HTTPSClientAuthConnection else: return http.client.HTTPConnection def _authenticate(self, force_reauth=False): """ Use the authentication plugin to authenticate and set the auth token. :param force_reauth: For re-authentication to bypass cache. """ auth_plugin = self.auth_plugin if not auth_plugin.is_authenticated or force_reauth: auth_plugin.authenticate() self.auth_token = auth_plugin.auth_token management_url = auth_plugin.management_url if management_url and self.configure_via_auth: self.configure_from_url(management_url) @handle_unauthenticated def do_request(self, method, action, body=None, headers=None, params=None): """ Make a request, returning an HTTP response object. :param method: HTTP verb (GET, POST, PUT, etc.) :param action: Requested path to append to self.doc_root :param body: Data to send in the body of the request :param headers: Headers to send with the request :param params: Key/value pairs to use in query string :returns: HTTP response object """ if not self.auth_token: self._authenticate() url = self._construct_url(action, params) # NOTE(ameade): We need to copy these kwargs since they can be altered # in _do_request but we need the originals if handle_unauthenticated # calls this function again. return self._do_request(method=method, url=url, body=copy.deepcopy(body), headers=copy.deepcopy(headers)) def _construct_url(self, action, params=None): """ Create a URL object we can use to pass to _do_request(). """ action = urlparse.quote(action) path = '/'.join([self.doc_root or '', action.lstrip('/')]) scheme = "https" if self.use_ssl else "http" if netutils.is_valid_ipv6(self.host): netloc = "[%s]:%d" % (self.host, self.port) else: netloc = "%s:%d" % (self.host, self.port) if isinstance(params, dict): for (key, value) in list(params.items()): if value is None: del params[key] continue if not isinstance(value, str): value = str(value) params[key] = encodeutils.safe_encode(value) query = urlparse.urlencode(params) else: query = None url = urlparse.ParseResult(scheme, netloc, path, '', query, '') log_msg = _("Constructed URL: %s") LOG.debug(log_msg, url.geturl()) return url @handle_redirects def _do_request(self, method, url, body, headers): """ Connects to the server and issues a request. Handles converting any returned HTTP error status codes to OpenStack/Glance exceptions and closing the server connection. Returns the result data, or raises an appropriate exception. :param method: HTTP method ("GET", "POST", "PUT", etc...) :param url: urlparse.ParsedResult object with URL information :param body: data to send (as string, filelike or iterable), or None (default) :param headers: mapping of key/value pairs to add as headers :note If the body param has a read attribute, and method is either POST or PUT, this method will automatically conduct a chunked-transfer encoding and use the body as a file object or iterable, transferring chunks of data using the connection's send() method. This allows large objects to be transferred efficiently without buffering the entire body in memory. """ if url.query: path = url.path + "?" + url.query else: path = url.path try: connection_type = self.get_connection_type() headers = headers or {} headers.update(osprofiler.web.get_trace_id_headers()) if 'x-auth-token' not in headers and self.auth_token: headers['x-auth-token'] = self.auth_token c = connection_type(url.hostname, url.port, **self.connect_kwargs) def _pushing(method): return method.lower() in ('post', 'put') def _simple(body): return body is None or isinstance(body, bytes) def _filelike(body): return hasattr(body, 'read') def _chunkbody(connection, iter): connection.putheader('Transfer-Encoding', 'chunked') connection.endheaders() for chunk in iter: connection.send('%x\r\n%s\r\n' % (len(chunk), chunk)) connection.send('0\r\n\r\n') # Do a simple request or a chunked request, depending # on whether the body param is file-like or iterable and # the method is PUT or POST # if not _pushing(method) or _simple(body): # Simple request... c.request(method, path, body, headers) elif _filelike(body) or self._iterable(body): c.putrequest(method, path) # According to HTTP/1.1, Content-Length and Transfer-Encoding # conflict. for header, value in headers.items(): if header.lower() != 'content-length': c.putheader(header, str(value)) iter = utils.chunkreadable(body) _chunkbody(c, iter) else: raise TypeError('Unsupported image type: %s' % body.__class__) res = c.getresponse() def _retry(res): return res.getheader('Retry-After') def read_body(res): body = res.read().decode('utf-8') return body status_code = self.get_status_code(res) if status_code in self.OK_RESPONSE_CODES: return res elif status_code in self.REDIRECT_RESPONSE_CODES: raise exception.RedirectException(res.getheader('Location')) elif status_code == http.client.UNAUTHORIZED: raise exception.NotAuthenticated(read_body(res)) elif status_code == http.client.FORBIDDEN: raise exception.Forbidden(read_body(res)) elif status_code == http.client.NOT_FOUND: raise exception.NotFound(read_body(res)) elif status_code == http.client.CONFLICT: raise exception.Duplicate(read_body(res)) elif status_code == http.client.BAD_REQUEST: raise exception.Invalid(read_body(res)) elif status_code == http.client.MULTIPLE_CHOICES: raise exception.MultipleChoices(body=read_body(res)) elif status_code == http.client.REQUEST_ENTITY_TOO_LARGE: raise exception.LimitExceeded(retry=_retry(res), body=read_body(res)) elif status_code == http.client.INTERNAL_SERVER_ERROR: raise exception.ServerError() elif status_code == http.client.SERVICE_UNAVAILABLE: raise exception.ServiceUnavailable(retry=_retry(res)) else: raise exception.UnexpectedStatus(status=status_code, body=read_body(res)) except (socket.error, IOError) as e: raise exception.ClientConnectionError(e) def _iterable(self, body): return isinstance(body, collections.abc.Iterable) def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int else: return response.status def _extract_params(self, actual_params, allowed_params): """ Extract a subset of keys from a dictionary. The filters key will also be extracted, and each of its values will be returned as an individual param. :param actual_params: dict of keys to filter :param allowed_params: list of keys that 'actual_params' will be reduced to :returns: subset of 'params' dict """ try: # expect 'filters' param to be a dict here result = dict(actual_params.get('filters')) except TypeError: result = {} for allowed_param in allowed_params: if allowed_param in actual_params: result[allowed_param] = actual_params[allowed_param] return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/config.py0000664000175000017500000007017000000000000017423 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Glance """ import logging import os from oslo_config import cfg from oslo_middleware import cors from oslo_policy import opts from oslo_policy import policy from paste import deploy from glance.i18n import _ from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor', sample_default='keystone', help=_(""" Deployment flavor to use in the server application pipeline. Provide a string value representing the appropriate deployment flavor used in the server application pipeline. This is typically the partial name of a pipeline in the paste configuration file with the service name removed. For example, if your paste section name in the paste configuration file is [pipeline:glance-api-keystone], set ``flavor`` to ``keystone``. Possible values: * String value representing a partial pipeline name. Related Options: * config_file """)), cfg.StrOpt('config_file', sample_default='glance-api-paste.ini', help=_(""" Name of the paste configuration file. Provide a string value representing the name of the paste configuration file to use for configuring pipelines for server application deployments. NOTES: * Provide the name or the path relative to the glance directory for the paste configuration file and not the absolute path. * The sample paste configuration file shipped with Glance need not be edited in most cases as it comes with ready-made pipelines for all common deployment flavors. If no value is specified for this option, the ``paste.ini`` file with the prefix of the corresponding Glance service's configuration file name will be searched for in the known configuration directories. (For example, if this option is missing from or has no value set in ``glance-api.conf``, the service will look for a file named ``glance-api-paste.ini``.) If the paste configuration file is not found, the service will not start. Possible values: * A string value representing the name of the paste configuration file. Related Options: * flavor """)), ] image_format_opts = [ cfg.ListOpt('container_formats', default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker', 'compressed'], help=_("Supported values for the 'container_format' " "image attribute"), deprecated_opts=[cfg.DeprecatedOpt('container_formats', group='DEFAULT')]), cfg.ListOpt('disk_formats', default=['ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'ploop'], help=_("Supported values for the 'disk_format' " "image attribute"), deprecated_opts=[cfg.DeprecatedOpt('disk_formats', group='DEFAULT')]), cfg.ListOpt('vmdk_allowed_types', default=['streamOptimized', 'monolithicSparse'], help=_("A list of strings describing allowed VMDK " "'create-type' subformats that will be allowed. " "This is recommended to only include " "single-file-with-sparse-header variants to avoid " "potential host file exposure due to processing named " "extents. If this list is empty, then no VDMK image " "types allowed. Note that this is currently only " "checked during image conversion (if enabled), and " "limits the types of VMDK images we will convert " "from.")), ] task_opts = [ cfg.IntOpt('task_time_to_live', default=48, help=_("Time in hours for which a task lives after, either " "succeeding or failing"), deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', group='DEFAULT')]), cfg.StrOpt('task_executor', default='taskflow', help=_(""" Task executor to be used to run task scripts. Provide a string value representing the executor to use for task executions. By default, ``TaskFlow`` executor is used. ``TaskFlow`` helps make task executions easy, consistent, scalable and reliable. It also enables creation of lightweight task objects and/or functions that are combined together into flows in a declarative manner. Possible values: * taskflow Related Options: * None """)), cfg.StrOpt('work_dir', sample_default='/work_dir', help=_(""" Absolute path to the work directory to use for asynchronous task operations. The directory set here will be used to operate over images - normally before they are imported in the destination store. NOTE: When providing a value for ``work_dir``, please make sure that enough space is provided for concurrent tasks to run efficiently without running out of space. A rough estimation can be done by multiplying the number of ``max_workers`` with an average image size (e.g 500MB). The image size estimation should be done based on the average size in your deployment. Note that depending on the tasks running you may need to multiply this number by some factor depending on what the task does. For example, you may want to double the available size if image conversion is enabled. All this being said, remember these are just estimations and you should do them based on the worst case scenario and be prepared to act in case they were wrong. Possible values: * String value representing the absolute path to the working directory Related Options: * None """)), ] common_opts = [ cfg.StrOpt('hashing_algorithm', default='sha512', help=_(""" Secure hashing algorithm used for computing the 'os_hash_value' property. This option configures the Glance "multihash", which consists of two image properties: the 'os_hash_algo' and the 'os_hash_value'. The 'os_hash_algo' will be populated by the value of this configuration option, and the 'os_hash_value' will be populated by the hexdigest computed when the algorithm is applied to the uploaded or imported image data. The value must be a valid secure hash algorithm name recognized by the python 'hashlib' library. You can determine what these are by examining the 'hashlib.algorithms_available' data member of the version of the library being used in your Glance installation. For interoperability purposes, however, we recommend that you use the set of secure hash names supplied by the 'hashlib.algorithms_guaranteed' data member because those algorithms are guaranteed to be supported by the 'hashlib' library on all platforms. Thus, any image consumer using 'hashlib' locally should be able to verify the 'os_hash_value' of the image. The default value of 'sha512' is a performant secure hash algorithm. If this option is misconfigured, any attempts to store image data will fail. For that reason, we recommend using the default value. Possible values: * Any secure hash algorithm name recognized by the Python 'hashlib' library Related options: * None """)), cfg.IntOpt('image_member_quota', default=128, help=_(""" Maximum number of image members per image. This limits the maximum of users an image can be shared with. Any negative value is interpreted as unlimited. Related options: * None """)), cfg.IntOpt('image_property_quota', default=128, help=_(""" Maximum number of properties allowed on an image. This enforces an upper limit on the number of additional properties an image can have. Any negative value is interpreted as unlimited. """)), cfg.IntOpt('image_tag_quota', default=128, help=_(""" Maximum number of tags allowed on an image. Any negative value is interpreted as unlimited. Related options: * None """)), cfg.IntOpt('image_location_quota', default=10, help=_(""" Maximum number of locations allowed on an image. Any negative value is interpreted as unlimited. Related options: * None """)), cfg.IntOpt('limit_param_default', default=25, min=1, help=_(""" The default number of results to return for a request. Responses to certain API requests, like list images, may return multiple items. The number of results returned can be explicitly controlled by specifying the ``limit`` parameter in the API request. However, if a ``limit`` parameter is not specified, this configuration value will be used as the default number of results to be returned for any API request. NOTES: * The value of this configuration option may not be greater than the value specified by ``api_limit_max``. * Setting this to a very large value may slow down database queries and increase response times. Setting this to a very low value may result in poor user experience. Possible values: * Any positive integer Related options: * api_limit_max """)), cfg.IntOpt('api_limit_max', default=1000, min=1, help=_(""" Maximum number of results that could be returned by a request. As described in the help text of ``limit_param_default``, some requests may return multiple results. The number of results to be returned are governed either by the ``limit`` parameter in the request or the ``limit_param_default`` configuration option. The value in either case, can't be greater than the absolute maximum defined by this configuration option. Anything greater than this value is trimmed down to the maximum value defined here. NOTE: Setting this to a very large value may slow down database queries and increase response times. Setting this to a very low value may result in poor user experience. Possible values: * Any positive integer Related options: * limit_param_default """)), cfg.BoolOpt('show_image_direct_url', default=False, help=_(""" Show direct image location when returning an image. This configuration option indicates whether to show the direct image location when returning image details to the user. The direct image location is where the image data is stored in backend storage. This image location is shown under the image property ``direct_url``. When multiple image locations exist for an image, the best location is displayed based on the store weightage assigned for each store indicated by the configuration option ``weight``. NOTES: * Revealing image locations can present a GRAVE SECURITY RISK as image locations can sometimes include credentials. Hence, this is set to ``False`` by default. Set this to ``True`` with EXTREME CAUTION and ONLY IF you know what you are doing! * If an operator wishes to avoid showing any image location(s) to the user, then both this option and ``show_multiple_locations`` MUST be set to ``False``. Possible values: * True * False Related options: * show_multiple_locations * weight """)), # NOTE(flaper87): The policy.yaml file should be updated and the location # related rules set to admin only once this option is finally removed. # NOTE(rosmaita): Unfortunately, this option is used to gate some code # paths; if the location related policies are set admin-only, then no # normal users can save or retrieve image data. cfg.BoolOpt('show_multiple_locations', default=False, deprecated_for_removal=True, deprecated_reason=_('Use of this option, deprecated since ' 'Newton, is a security risk and will be ' 'removed once we figure out a way to ' 'satisfy those use cases that currently ' 'require it. An earlier announcement ' 'that the same functionality can be ' 'achieved with greater granularity by ' 'using policies is incorrect. You cannot ' 'work around this option via policy ' 'configuration at the present time, ' 'though that is the direction we believe ' 'the fix will take. Please keep an eye ' 'on the Glance release notes to stay up ' 'to date on progress in addressing this ' 'issue.'), deprecated_since='Newton', help=_(""" Show all image locations when returning an image. This configuration option indicates whether to show all the image locations when returning image details to the user. When multiple image locations exist for an image, the locations are ordered based on the store weightage assigned for each store indicated by the configuration option ``weight``. The image locations are shown under the image property ``locations``. NOTES: * Revealing image locations can present a GRAVE SECURITY RISK as image locations can sometimes include credentials. Hence, this is set to ``False`` by default. Set this to ``True`` with EXTREME CAUTION and ONLY IF you know what you are doing! * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more information. * If an operator wishes to avoid showing any image location(s) to the user, then both this option and ``show_image_direct_url`` MUST be set to ``False``. Possible values: * True * False Related options: * show_image_direct_url * weight """)), cfg.BoolOpt('do_secure_hash', default=True, help=_(""" Calculate hash and checksum for the image. This configuration option indicates that /v2/images/{image_id}/locations POST API will calculate hash and checksum of the image on the fly. If False it will silently ignore the hash and checksum calculation. Possible values: * True * False """)), cfg.IntOpt('http_retries', default=3, help=_(""" The number of times to retry when any operation fails. """)), cfg.IntOpt('image_size_cap', default=1099511627776, min=1, max=9223372036854775808, help=_(""" Maximum size of image a user can upload in bytes. An image upload greater than the size mentioned here would result in an image creation failure. This configuration option defaults to 1099511627776 bytes (1 TiB). NOTES: * This value should only be increased after careful consideration and must be set less than or equal to 8 EiB (9223372036854775808). * This value must be set with careful consideration of the backend storage capacity. Setting this to a very low value may result in a large number of image failures. And, setting this to a very large value may result in faster consumption of storage. Hence, this must be set according to the nature of images created and storage capacity available. Possible values: * Any positive number less than or equal to 9223372036854775808 """)), cfg.StrOpt('user_storage_quota', default='0', help=_(""" Maximum amount of image storage per tenant. This enforces an upper limit on the cumulative storage consumed by all images of a tenant across all stores. This is a per-tenant limit. The default unit for this configuration option is Bytes. However, storage units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``, ``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. Note that there should not be any space between the value and unit. Value ``0`` signifies no quota enforcement. Negative values are invalid and result in errors. This has no effect if ``use_keystone_limits`` is enabled. Possible values: * A string that is a valid concatenation of a non-negative integer representing the storage value and an optional string literal representing storage units as mentioned above. Related options: * use_keystone_limits """)), cfg.BoolOpt('use_keystone_limits', default=False, help=_(""" Utilize per-tenant resource limits registered in Keystone. Enabling this feature will cause Glance to retrieve limits set in keystone for resource consumption and enforce them against API users. Before turning this on, the limits need to be registered in Keystone or all quotas will be considered to be zero, and thus reject all new resource requests. These per-tenant resource limits are independent from the static global ones configured in this config file. If this is enabled, the relevant static global limits will be ignored. """)), cfg.HostAddressOpt('pydev_worker_debug_host', sample_default='localhost', help=_(""" Host address of the pydev server. Provide a string value representing the hostname or IP of the pydev server to use for debugging. The pydev server listens for debug connections on this address, facilitating remote debugging in Glance. Possible values: * Valid hostname * Valid IP address Related options: * None """)), cfg.PortOpt('pydev_worker_debug_port', default=5678, help=_(""" Port number that the pydev server will listen on. Provide a port number to bind the pydev server to. The pydev process accepts debug connections on this port and facilitates remote debugging in Glance. Possible values: * A valid port number Related options: * None """)), cfg.StrOpt('metadata_encryption_key', deprecated_for_removal=True, deprecated_since="Dalmatian", deprecated_reason=_(""" This option doesnt serves the purpose of encryption of location metadata, whereas it encrypts location url only for specific APIs. Also if enabled this during an upgrade may disrupt existing deployments, as it does not support/provide db upgrade script to encrypt existing location URLs. Moreover, its functionality for encrypting location URLs is inconsistent which is resulting in download failures. """), secret=True, help=_(""" AES key for encrypting store location metadata. Provide a string value representing the AES cipher to use for encrypting Glance store metadata. NOTE: The AES key to use must be set to a random string of length 16, 24 or 32 bytes. Possible values: * String value representing a valid AES key Related options: * None """)), cfg.StrOpt('digest_algorithm', default='sha256', deprecated_for_removal=True, deprecated_since="Dalmatian", deprecated_reason=_(""" This option has had no effect since the removal of native SSL support. """), help=_(""" Digest algorithm to use for digital signature. Provide a string value representing the digest algorithm to use for generating digital signatures. By default, ``sha256`` is used. To get a list of the available algorithms supported by the version of OpenSSL on your platform, run the command: ``openssl list-message-digest-algorithms``. Examples are 'sha1', 'sha256', and 'sha512'. NOTE: ``digest_algorithm`` is not related to Glance's image signing and verification. It is only used to sign the universally unique identifier (UUID) as a part of the certificate file and key file validation. Possible values: * An OpenSSL message digest algorithm identifier Relation options: * None """)), cfg.StrOpt('node_staging_uri', default='file:///tmp/staging/', help=_(""" The URL provides location where the temporary data will be stored This option is for Glance internal use only. Glance will save the image data uploaded by the user to 'staging' endpoint during the image import process. This option does not change the 'staging' API endpoint by any means. NOTE: It is discouraged to use same path as [task]/work_dir NOTE: 'file://' is the only option api_image_import flow will support for now. NOTE: The staging path must be on shared filesystem available to all Glance API nodes. Possible values: * String starting with 'file://' followed by absolute FS path Related options: * [task]/work_dir """)), cfg.ListOpt('enabled_import_methods', item_type=cfg.types.String(quotes=True), bounds=True, default=['glance-direct', 'web-download', 'copy-image'], help=_(""" List of enabled Image Import Methods 'glance-direct', 'copy-image' and 'web-download' are enabled by default. 'glance-download' is available, but requires federated deployments. Related options: * [DEFAULT]/node_staging_uri""")), cfg.StrOpt('worker_self_reference_url', default=None, help=_(""" The URL to this worker. If this is set, other glance workers will know how to contact this one directly if needed. For image import, a single worker stages the image and other workers need to be able to proxy the import request to the right one. If unset, this will be considered to be `public_endpoint`, which normally would be set to the same value on all workers, effectively disabling the proxying behavior. Possible values: * A URL by which this worker is reachable from other workers Related options: * public_endpoint """)), ] wsgi_opts = [ cfg.IntOpt('task_pool_threads', default=16, min=1, help=_(""" The number of threads (per worker process) in the pool for processing asynchronous tasks. This controls how many asynchronous tasks (i.e. for image interoperable import) each worker can run at a time. If this is too large, you *may* have increased memory footprint per worker and/or you may overwhelm other system resources such as disk or outbound network bandwidth. If this is too small, image import requests will have to wait until a thread becomes available to begin processing.""")), cfg.StrOpt('python_interpreter', default=None, help=_(""" Path to the python interpreter to use when spawning external processes. If left unspecified, this will be sys.executable, which should be the same interpreter running Glance itself. However, in some situations (for example, uwsgi) sys.executable may not actually point to a python interpreter and an alternative value must be set.""")), ] CONF = cfg.CONF CONF.register_opts(paste_deploy_opts, group='paste_deploy') CONF.register_opts(image_format_opts, group='image_format') CONF.register_opts(task_opts, group='task') CONF.register_opts(common_opts) CONF.register_opts(wsgi_opts, group='wsgi') policy.Enforcer(CONF) def parse_args(args=None, usage=None, default_config_files=None): CONF(args=args, project='glance', version=version.cached_version_string(), usage=usage, default_config_files=default_config_files) def parse_cache_args(args=None): config_files = cfg.find_config_files(project='glance', prog='glance-api') # NOTE(abhishekk): Reading glance-api file first and glance-cache file # later so that if glance-cache file has different values set for some # cache related options then they should take precedence. config_files.extend(cfg.find_config_files(project='glance', prog='glance-cache')) parse_args(args=args, default_config_files=config_files) def _get_deployment_flavor(flavor=None): """ Retrieve the paste_deploy.flavor config item, formatted appropriately for appending to the application name. :param flavor: if specified, use this setting rather than the paste_deploy.flavor configuration setting """ if not flavor: flavor = CONF.paste_deploy.flavor return '' if not flavor else ('-' + flavor) def _get_paste_config_path(): paste_suffix = '-paste.ini' conf_suffix = '.conf' if CONF.config_file: # Assume paste config is in a paste.ini file corresponding # to the last config file path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) else: path = CONF.prog + paste_suffix return CONF.find_file(os.path.basename(path)) def _get_deployment_config_file(): """ Retrieve the deployment_config_file config item, formatted as an absolute pathname. """ path = CONF.paste_deploy.config_file if not path: path = _get_paste_config_path() if not path or not (os.path.isfile(os.path.abspath(path))): msg = _("Unable to locate paste config file for %s.") % CONF.prog raise RuntimeError(msg) return os.path.abspath(path) def load_paste_app(app_name, flavor=None, conf_file=None): """ Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file, if conf_file is None. :param app_name: name of the application to load :param flavor: name of the variant of the application to load :param conf_file: path to the paste config file :raises RuntimeError: when config file cannot be located or application cannot be loaded from config file """ # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor(flavor) if not conf_file: conf_file = _get_deployment_config_file() try: logger = logging.getLogger(__name__) logger.debug("Loading %(app_name)s from %(conf_file)s", {'conf_file': conf_file, 'app_name': app_name}) app = deploy.loadapp("config:%s" % conf_file, name=app_name) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logger, logging.DEBUG) return app except (LookupError, ImportError) as e: msg = (_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e}) logger.error(msg) raise RuntimeError(msg) def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 DEFAULT_POLICY_FILE = 'policy.yaml' opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE) def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['Content-MD5', 'X-Image-Meta-Checksum', 'X-Storage-Token', 'Accept-Encoding', 'X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Image-Meta-Checksum', 'X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/crypt.py0000664000175000017500000000621000000000000017311 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for URL-safe encrypting/decrypting """ import base64 import os import random from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes from oslo_utils import encodeutils def urlsafe_encrypt(key, plaintext, blocksize=16): """ Encrypts plaintext. Resulting ciphertext will contain URL-safe characters. If plaintext is Unicode, encode it to UTF-8 before encryption. :param key: AES secret key :param plaintext: Input text to be encrypted :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) :returns: Resulting ciphertext """ def pad(text): """ Pads text to be encrypted """ pad_length = (blocksize - len(text) % blocksize) # NOTE(rosmaita): I know this looks stupid, but we can't just # use os.urandom() to get the bytes because we use char(0) as # a delimiter pad = b''.join(bytes((random.SystemRandom().randint(1, 0xFF),)) for i in range(pad_length - 1)) # We use chr(0) as a delimiter between text and padding return text + b'\0' + pad plaintext = encodeutils.to_utf8(plaintext) key = encodeutils.to_utf8(key) # random initial 16 bytes for CBC init_vector = os.urandom(16) backend = default_backend() cypher = Cipher(algorithms.AES(key), modes.CBC(init_vector), backend=backend) encryptor = cypher.encryptor() padded = encryptor.update(pad(plaintext)) + encryptor.finalize() encoded = base64.urlsafe_b64encode(init_vector + padded) encoded = encoded.decode('ascii') return encoded def urlsafe_decrypt(key, ciphertext): """ Decrypts URL-safe base64 encoded ciphertext. On Python 3, the result is decoded from UTF-8. :param key: AES secret key :param ciphertext: The encrypted text to decrypt :returns: Resulting plaintext """ # Cast from unicode ciphertext = encodeutils.to_utf8(ciphertext) key = encodeutils.to_utf8(key) ciphertext = base64.urlsafe_b64decode(ciphertext) backend = default_backend() cypher = Cipher(algorithms.AES(key), modes.CBC(ciphertext[:16]), backend=backend) decryptor = cypher.decryptor() padded = decryptor.update(ciphertext[16:]) + decryptor.finalize() text = padded[:padded.rfind(b'\0')] text = text.decode('utf-8') return text ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/exception.py0000664000175000017500000003426000000000000020154 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Glance exception subclasses""" import urllib.parse as urlparse from glance.i18n import _ _FATAL_EXCEPTION_FORMAT_ERRORS = False class RedirectException(Exception): def __init__(self, url): self.url = urlparse.urlparse(url) class GlanceException(Exception): """ Base Glance Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred") def __init__(self, message=None, *args, **kwargs): if not message: message = self.message try: if kwargs: message = message % kwargs except Exception: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise else: # at least get the core message out if something happened pass self.msg = message super(GlanceException, self).__init__(message) class MissingCredentialError(GlanceException): message = _("Missing required credential: %(required)s") class BadAuthStrategy(GlanceException): message = _("Incorrect auth strategy, expected \"%(expected)s\" but " "received \"%(received)s\"") class NotFound(GlanceException): message = _("An object with the specified identifier was not found.") class BadStoreUri(GlanceException): message = _("The Store URI was malformed.") class Duplicate(GlanceException): message = _("An object with the same identifier already exists.") class Conflict(GlanceException): message = _("An object with the same identifier is currently being " "operated on.") class StorageQuotaFull(GlanceException): message = _("The size of the data %(image_size)s will exceed the limit. " "%(remaining)s bytes remaining.") class AuthBadRequest(GlanceException): message = _("Connect error/bad request to Auth service at URL %(url)s.") class AuthUrlNotFound(GlanceException): message = _("Auth service at URL %(url)s not found.") class AuthorizationFailure(GlanceException): message = _("Authorization failed.") class NotAuthenticated(GlanceException): message = _("You are not authenticated.") class UploadException(GlanceException): message = _('Image upload problem: %s') class Forbidden(GlanceException): message = _("You are not authorized to complete %(action)s action.") class ForbiddenPublicImage(Forbidden): message = _("You are not authorized to complete this action.") class ProtectedImageDelete(Forbidden): message = _("Image %(image_id)s is protected and cannot be deleted.") class ProtectedMetadefNamespaceDelete(Forbidden): message = _("Metadata definition namespace %(namespace)s is protected" " and cannot be deleted.") class ProtectedMetadefNamespacePropDelete(Forbidden): message = _("Metadata definition property %(property_name)s is protected" " and cannot be deleted.") class ProtectedMetadefObjectDelete(Forbidden): message = _("Metadata definition object %(object_name)s is protected" " and cannot be deleted.") class ProtectedMetadefResourceTypeAssociationDelete(Forbidden): message = _("Metadata definition resource-type-association" " %(resource_type)s is protected and cannot be deleted.") class ProtectedMetadefResourceTypeSystemDelete(Forbidden): message = _("Metadata definition resource-type %(resource_type_name)s is" " a seeded-system type and cannot be deleted.") class ProtectedMetadefTagDelete(Forbidden): message = _("Metadata definition tag %(tag_name)s is protected" " and cannot be deleted.") class Invalid(GlanceException): message = _("Data supplied was not valid.") class InvalidSortKey(Invalid): message = _("Sort key supplied was not valid.") class InvalidSortDir(Invalid): message = _("Sort direction supplied was not valid.") class InvalidPropertyProtectionConfiguration(Invalid): message = _("Invalid configuration in property protection file.") class InvalidSwiftStoreConfiguration(Invalid): message = _("Invalid configuration in glance-swift conf file.") class InvalidFilterOperatorValue(Invalid): message = _("Unable to filter using the specified operator.") class InvalidFilterRangeValue(Invalid): message = _("Unable to filter using the specified range.") class InvalidOptionValue(Invalid): message = _("Invalid value for option %(option)s: %(value)s") class ReadonlyProperty(Forbidden): message = _("Attribute '%(property)s' is read-only.") class ReservedProperty(Forbidden): message = _("Attribute '%(property)s' is reserved.") class AuthorizationRedirect(GlanceException): message = _("Redirecting to %(uri)s for authorization.") class ClientConnectionError(GlanceException): message = _("There was an error connecting to a server") class ClientConfigurationError(GlanceException): message = _("There was an error configuring the client.") class MultipleChoices(GlanceException): message = _("The request returned a 302 Multiple Choices. This generally " "means that you have not included a version indicator in a " "request URI.\n\nThe body of response returned:\n%(body)s") class LimitExceeded(GlanceException): message = _("The request returned a 413 Request Entity Too Large. This " "generally means that rate limiting or a quota threshold was " "breached.\n\nThe response body:\n%(body)s") def __init__(self, *args, **kwargs): self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') else None) super(LimitExceeded, self).__init__(*args, **kwargs) class ServiceUnavailable(GlanceException): message = _("The request returned 503 Service Unavailable. This " "generally occurs on service overload or other transient " "outage.") def __init__(self, *args, **kwargs): self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') else None) super(ServiceUnavailable, self).__init__(*args, **kwargs) class ServerError(GlanceException): message = _("The request returned 500 Internal Server Error.") class UnexpectedStatus(GlanceException): message = _("The request returned an unexpected status: %(status)s." "\n\nThe response body:\n%(body)s") class InvalidContentType(GlanceException): message = _("Invalid content type %(content_type)s") class BadRegistryConnectionConfiguration(GlanceException): message = _("Registry was not configured correctly on API server. " "Reason: %(reason)s") class BadDriverConfiguration(GlanceException): message = _("Driver %(driver_name)s could not be configured correctly. " "Reason: %(reason)s") class MaxRedirectsExceeded(GlanceException): message = _("Maximum redirects (%(redirects)s) was exceeded.") class InvalidRedirect(GlanceException): message = _("Received invalid HTTP redirect.") class NoServiceEndpoint(GlanceException): message = _("Response from Keystone does not contain a Glance endpoint.") class RegionAmbiguity(GlanceException): message = _("Multiple 'image' service matches for region %(region)s. This " "generally means that a region is required and you have not " "supplied one.") class WorkerCreationFailure(GlanceException): message = _("Server worker creation failed: %(reason)s.") class SchemaLoadError(GlanceException): message = _("Unable to load schema: %(reason)s") class InvalidObject(GlanceException): message = _("Provided object does not match schema " "'%(schema)s': %(reason)s") class ImageSizeLimitExceeded(GlanceException): message = _("The provided image is too large.") class FailedToGetScrubberJobs(GlanceException): message = _("Scrubber encountered an error while trying to fetch " "scrub jobs.") class ImageMemberLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "members for this image. Attempted: %(attempted)s, " "Maximum: %(maximum)s") class ImagePropertyLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") class ImageTagLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") class ImageLocationLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") class SIGHUPInterrupt(GlanceException): message = _("System SIGHUP signal received.") class RPCError(GlanceException): message = _("%(cls)s exception was raised in the last rpc call: %(val)s") class TaskException(GlanceException): message = _("An unknown task exception occurred") class BadTaskConfiguration(GlanceException): message = _("Task was not configured properly") class ImageNotFound(NotFound): message = _("Image with the given id %(image_id)s was not found") class TaskNotFound(TaskException, NotFound): message = _("Task with the given id %(task_id)s was not found") class InvalidTaskStatus(TaskException, Invalid): message = _("Provided status of task is unsupported: %(status)s") class InvalidTaskType(TaskException, Invalid): message = _("Provided type of task is unsupported: %(type)s") class InvalidTaskStatusTransition(TaskException, Invalid): message = _("Status transition from %(cur_status)s to" " %(new_status)s is not allowed") class ImportTaskError(TaskException, Invalid): message = _("An import task exception occurred") class TaskAbortedError(ImportTaskError): message = _("Task was aborted externally") class DuplicateLocation(Duplicate): message = _("The location %(location)s already exists") class InvalidParameterValue(Invalid): message = _("Invalid value '%(value)s' for parameter '%(param)s': " "%(extra_msg)s") class InvalidImageStatusTransition(Invalid): message = _("Image status transition from %(cur_status)s to" " %(new_status)s is not allowed") class MetadefDuplicateNamespace(Duplicate): message = _("The metadata definition namespace=%(namespace_name)s" " already exists.") class MetadefDuplicateObject(Duplicate): message = _("A metadata definition object with name=%(object_name)s" " already exists in namespace=%(namespace_name)s.") class MetadefDuplicateProperty(Duplicate): message = _("A metadata definition property with name=%(property_name)s" " already exists in namespace=%(namespace_name)s.") class MetadefDuplicateResourceType(Duplicate): message = _("A metadata definition resource-type with" " name=%(resource_type_name)s already exists.") class MetadefDuplicateResourceTypeAssociation(Duplicate): message = _("The metadata definition resource-type association of" " resource-type=%(resource_type_name)s to" " namespace=%(namespace_name)s" " already exists.") class MetadefDuplicateTag(Duplicate): message = _("A metadata tag with name=%(name)s" " already exists in namespace=%(namespace_name)s." " (Please note that metadata tag names are" " case insensitive).") class MetadefForbidden(Forbidden): message = _("You are not authorized to complete this action.") class MetadefIntegrityError(Forbidden): message = _("The metadata definition %(record_type)s with" " name=%(record_name)s not deleted." " Other records still refer to it.") class MetadefNamespaceNotFound(NotFound): message = _("Metadata definition namespace=%(namespace_name)s" " was not found.") class MetadefObjectNotFound(NotFound): message = _("The metadata definition object with" " name=%(object_name)s was not found in" " namespace=%(namespace_name)s.") class MetadefPropertyNotFound(NotFound): message = _("The metadata definition property with" " name=%(property_name)s was not found in" " namespace=%(namespace_name)s.") class MetadefResourceTypeNotFound(NotFound): message = _("The metadata definition resource-type with" " name=%(resource_type_name)s, was not found.") class MetadefResourceTypeAssociationNotFound(NotFound): message = _("The metadata definition resource-type association of" " resource-type=%(resource_type_name)s to" " namespace=%(namespace_name)s," " was not found.") class MetadefTagNotFound(NotFound): message = _("The metadata definition tag with" " name=%(name)s was not found in" " namespace=%(namespace_name)s.") class InvalidDataMigrationScript(GlanceException): message = _("Invalid data migration script '%(script)s'. A valid data " "migration script must implement functions 'has_migrations' " "and 'migrate'.") class GlanceEndpointNotFound(NotFound): message = _("%(interface)s glance endpoint not " "found for region %(region)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/format_inspector.py0000775000175000017500000011402200000000000021532 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a python implementation of virtual disk format inspection routines gathered from various public specification documents, as well as qemu disk driver code. It attempts to store and parse the minimum amount of data required, and in a streaming-friendly manner to collect metadata about complex-format images. """ import struct from oslo_log import log as logging from oslo_utils import units LOG = logging.getLogger(__name__) def chunked_reader(fileobj, chunk_size=512): while True: chunk = fileobj.read(chunk_size) if not chunk: break yield chunk class CaptureRegion(object): """Represents a region of a file we want to capture. A region of a file we want to capture requires a byte offset into the file and a length. This is expected to be used by a data processing loop, calling capture() with the most recently-read chunk. This class handles the task of grabbing the desired region of data across potentially multiple fractional and unaligned reads. :param offset: Byte offset into the file starting the region :param length: The length of the region """ def __init__(self, offset, length): self.offset = offset self.length = length self.data = b'' @property def complete(self): """Returns True when we have captured the desired data.""" return self.length == len(self.data) def capture(self, chunk, current_position): """Process a chunk of data. This should be called for each chunk in the read loop, at least until complete returns True. :param chunk: A chunk of bytes in the file :param current_position: The position of the file processed by the read loop so far. Note that this will be the position in the file *after* the chunk being presented. """ read_start = current_position - len(chunk) if (read_start <= self.offset <= current_position or self.offset <= read_start <= (self.offset + self.length)): if read_start < self.offset: lead_gap = self.offset - read_start else: lead_gap = 0 self.data += chunk[lead_gap:] self.data = self.data[:self.length] class ImageFormatError(Exception): """An unrecoverable image format error that aborts the process.""" pass class TraceDisabled(object): """A logger-like thing that swallows tracing when we do not want it.""" def debug(self, *a, **k): pass info = debug warning = debug error = debug class FileInspector(object): """A stream-based disk image inspector. This base class works on raw images and is subclassed for more complex types. It is to be presented with the file to be examined one chunk at a time, during read processing and will only store as much data as necessary to determine required attributes of the file. """ def __init__(self, tracing=False): self._total_count = 0 # NOTE(danms): The logging in here is extremely verbose for a reason, # but should never really be enabled at that level at runtime. To # retain all that work and assist in future debug, we have a separate # debug flag that can be passed from a manual tool to turn it on. if tracing: self._log = logging.getLogger(str(self)) else: self._log = TraceDisabled() self._capture_regions = {} def _capture(self, chunk, only=None): for name, region in self._capture_regions.items(): if only and name not in only: continue if not region.complete: region.capture(chunk, self._total_count) def eat_chunk(self, chunk): """Call this to present chunks of the file to the inspector.""" pre_regions = set(self._capture_regions.keys()) # Increment our position-in-file counter self._total_count += len(chunk) # Run through the regions we know of to see if they want this # data self._capture(chunk) # Let the format do some post-read processing of the stream self.post_process() # Check to see if the post-read processing added new regions # which may require the current chunk. new_regions = set(self._capture_regions.keys()) - pre_regions if new_regions: self._capture(chunk, only=new_regions) def post_process(self): """Post-read hook to process what has been read so far. This will be called after each chunk is read and potentially captured by the defined regions. If any regions are defined by this call, those regions will be presented with the current chunk in case it is within one of the new regions. """ pass def region(self, name): """Get a CaptureRegion by name.""" return self._capture_regions[name] def new_region(self, name, region): """Add a new CaptureRegion by name.""" if self.has_region(name): # This is a bug, we tried to add the same region twice raise ImageFormatError('Inspector re-added region %s' % name) self._capture_regions[name] = region def has_region(self, name): """Returns True if named region has been defined.""" return name in self._capture_regions @property def format_match(self): """Returns True if the file appears to be the expected format.""" return True @property def virtual_size(self): """Returns the virtual size of the disk image, or zero if unknown.""" return self._total_count @property def actual_size(self): """Returns the total size of the file, usually smaller than virtual_size. NOTE: this will only be accurate if the entire file is read and processed. """ return self._total_count @property def complete(self): """Returns True if we have all the information needed.""" return all(r.complete for r in self._capture_regions.values()) def __str__(self): """The string name of this file format.""" return 'raw' @property def context_info(self): """Return info on amount of data held in memory for auditing. This is a dict of region:sizeinbytes items that the inspector uses to examine the file. """ return {name: len(region.data) for name, region in self._capture_regions.items()} @classmethod def from_file(cls, filename): """Read as much of a file as necessary to complete inspection. NOTE: Because we only read as much of the file as necessary, the actual_size property will not reflect the size of the file, but the amount of data we read before we satisfied the inspector. Raises ImageFormatError if we cannot parse the file. """ inspector = cls() with open(filename, 'rb') as f: for chunk in chunked_reader(f): inspector.eat_chunk(chunk) if inspector.complete: # No need to eat any more data break if not inspector.complete or not inspector.format_match: raise ImageFormatError('File is not in requested format') return inspector def safety_check(self): """Perform some checks to determine if this file is safe. Returns True if safe, False otherwise. It may raise ImageFormatError if safety cannot be guaranteed because of parsing or other errors. """ return True # The qcow2 format consists of a big-endian 72-byte header, of which # only a small portion has information we care about: # # Dec Hex Name # 0 0x00 Magic 4-bytes 'QFI\xfb' # 4 0x04 Version (uint32_t, should always be 2 for modern files) # . . . # 8 0x08 Backing file offset (uint64_t) # 24 0x18 Size in bytes (unint64_t) # . . . # 72 0x48 Incompatible features bitfield (6 bytes) # # https://gitlab.com/qemu-project/qemu/-/blob/master/docs/interop/qcow2.txt class QcowInspector(FileInspector): """QEMU QCOW2 Format This should only require about 32 bytes of the beginning of the file to determine the virtual size, and 104 bytes to perform the safety check. """ BF_OFFSET = 0x08 BF_OFFSET_LEN = 8 I_FEATURES = 0x48 I_FEATURES_LEN = 8 I_FEATURES_DATAFILE_BIT = 3 I_FEATURES_MAX_BIT = 4 def __init__(self, *a, **k): super(QcowInspector, self).__init__(*a, **k) self.new_region('header', CaptureRegion(0, 512)) def _qcow_header_data(self): magic, version, bf_offset, bf_sz, cluster_bits, size = ( struct.unpack('>4sIQIIQ', self.region('header').data[:32])) return magic, size @property def has_header(self): return self.region('header').complete @property def virtual_size(self): if not self.region('header').complete: return 0 if not self.format_match: return 0 magic, size = self._qcow_header_data() return size @property def format_match(self): if not self.region('header').complete: return False magic, size = self._qcow_header_data() return magic == b'QFI\xFB' @property def has_backing_file(self): if not self.region('header').complete: return None if not self.format_match: return False bf_offset_bytes = self.region('header').data[ self.BF_OFFSET:self.BF_OFFSET + self.BF_OFFSET_LEN] # nonzero means "has a backing file" bf_offset, = struct.unpack('>Q', bf_offset_bytes) return bf_offset != 0 @property def has_unknown_features(self): if not self.region('header').complete: return None if not self.format_match: return False i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # This is the maximum byte number we should expect any bits to be set max_byte = self.I_FEATURES_MAX_BIT // 8 # The flag bytes are in big-endian ordering, so if we process # them in index-order, they're reversed for i, byte_num in enumerate(reversed(range(self.I_FEATURES_LEN))): if byte_num == max_byte: # If we're in the max-allowed byte, allow any bits less than # the maximum-known feature flag bit to be set allow_mask = ((1 << self.I_FEATURES_MAX_BIT) - 1) elif byte_num > max_byte: # If we're above the byte with the maximum known feature flag # bit, then we expect all zeroes allow_mask = 0x0 else: # Any earlier-than-the-maximum byte can have any of the flag # bits set allow_mask = 0xFF if i_features[i] & ~allow_mask: LOG.warning('Found unknown feature bit in byte %i: %s/%s', byte_num, bin(i_features[byte_num] & ~allow_mask), bin(allow_mask)) return True return False @property def has_data_file(self): if not self.region('header').complete: return None if not self.format_match: return False i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # First byte of bitfield, which is i_features[7] byte = self.I_FEATURES_LEN - 1 - self.I_FEATURES_DATAFILE_BIT // 8 # Third bit of bitfield, which is 0x04 bit = 1 << (self.I_FEATURES_DATAFILE_BIT - 1 % 8) return bool(i_features[byte] & bit) def __str__(self): return 'qcow2' def safety_check(self): return (not self.has_backing_file and not self.has_data_file and not self.has_unknown_features) class QEDInspector(FileInspector): def __init__(self, tracing=False): super().__init__(tracing) self.new_region('header', CaptureRegion(0, 512)) @property def format_match(self): if not self.region('header').complete: return False return self.region('header').data.startswith(b'QED\x00') def safety_check(self): # QED format is not supported by anyone, but we want to detect it # and mark it as just always unsafe. return False # The VHD (or VPC as QEMU calls it) format consists of a big-endian # 512-byte "footer" at the beginning of the file with various # information, most of which does not matter to us: # # Dec Hex Name # 0 0x00 Magic string (8-bytes, always 'conectix') # 40 0x28 Disk size (uint64_t) # # https://github.com/qemu/qemu/blob/master/block/vpc.c class VHDInspector(FileInspector): """Connectix/MS VPC VHD Format This should only require about 512 bytes of the beginning of the file to determine the virtual size. """ def __init__(self, *a, **k): super(VHDInspector, self).__init__(*a, **k) self.new_region('header', CaptureRegion(0, 512)) @property def format_match(self): return self.region('header').data.startswith(b'conectix') @property def virtual_size(self): if not self.region('header').complete: return 0 if not self.format_match: return 0 return struct.unpack('>Q', self.region('header').data[40:48])[0] def __str__(self): return 'vhd' # The VHDX format consists of a complex dynamic little-endian # structure with multiple regions of metadata and data, linked by # offsets with in the file (and within regions), identified by MSFT # GUID strings. The header is a 320KiB structure, only a few pieces of # which we actually need to capture and interpret: # # Dec Hex Name # 0 0x00000 Identity (Technically 9-bytes, padded to 64KiB, the first # 8 bytes of which are 'vhdxfile') # 196608 0x30000 The Region table (64KiB of a 32-byte header, followed # by up to 2047 36-byte region table entry structures) # # The region table header includes two items we need to read and parse, # which are: # # 196608 0x30000 4-byte signature ('regi') # 196616 0x30008 Entry count (uint32-t) # # The region table entries follow the region table header immediately # and are identified by a 16-byte GUID, and provide an offset of the # start of that region. We care about the "metadata region", identified # by the METAREGION class variable. The region table entry is (offsets # from the beginning of the entry, since it could be in multiple places): # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 Offset of the actual metadata region (uint64_t) # # When we find the METAREGION table entry, we need to grab that offset # and start examining the region structure at that point. That # consists of a metadata table of structures, which point to places in # the data in an unstructured space that follows. The header is # (offsets relative to the region start): # # 0 0x00000 8-byte signature ('metadata') # . . . # 16 0x00010 2-byte entry count (up to 2047 entries max) # # This header is followed by the specified number of metadata entry # structures, identified by GUID: # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 4-byte offset (uint32_t, relative to the beginning of # the metadata region) # # We need to find the "Virtual Disk Size" metadata item, identified by # the GUID in the VIRTUAL_DISK_SIZE class variable, grab the offset, # add it to the offset of the metadata region, and examine that 8-byte # chunk of data that follows. # # The "Virtual Disk Size" is a naked uint64_t which contains the size # of the virtual disk, and is our ultimate target here. # # https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-vhdx/83e061f8-f6e2-4de1-91bd-5d518a43d477 class VHDXInspector(FileInspector): """MS VHDX Format This requires some complex parsing of the stream. The first 256KiB of the image is stored to get the header and region information, and then we capture the first metadata region to read those records, find the location of the virtual size data and parse it. This needs to store the metadata table entries up until the VDS record, which may consist of up to 2047 32-byte entries at max. Finally, it must store a chunk of data at the offset of the actual VDS uint64. """ METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E' VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8' VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu def __init__(self, *a, **k): super(VHDXInspector, self).__init__(*a, **k) self.new_region('ident', CaptureRegion(0, 32)) self.new_region('header', CaptureRegion(192 * 1024, 64 * 1024)) def post_process(self): # After reading a chunk, we may have the following conditions: # # 1. We may have just completed the header region, and if so, # we need to immediately read and calculate the location of # the metadata region, as it may be starting in the same # read we just did. # 2. We may have just completed the metadata region, and if so, # we need to immediately calculate the location of the # "virtual disk size" record, as it may be starting in the # same read we just did. if self.region('header').complete and not self.has_region('metadata'): region = self._find_meta_region() if region: self.new_region('metadata', region) elif self.has_region('metadata') and not self.has_region('vds'): region = self._find_meta_entry(self.VIRTUAL_DISK_SIZE) if region: self.new_region('vds', region) @property def format_match(self): return self.region('ident').data.startswith(b'vhdxfile') @staticmethod def _guid(buf): """Format a MSFT GUID from the 16-byte input buffer.""" guid_format = '= 2048: raise ImageFormatError('Region count is %i (limit 2047)' % count) # Process the regions until we find the metadata one; grab the # offset and return self._log.debug('Region entry first is %x', region_entry_first) self._log.debug('Region entries %i', count) meta_offset = 0 for i in range(0, count): entry_start = region_entry_first + (i * 32) entry_end = entry_start + 32 entry = self.region('header').data[entry_start:entry_end] self._log.debug('Entry offset is %x', entry_start) # GUID is the first 16 bytes guid = self._guid(entry[:16]) if guid == self.METAREGION: # This entry is the metadata region entry meta_offset, meta_len, meta_req = struct.unpack( '= 2048: raise ImageFormatError( 'Metadata item count is %i (limit 2047)' % count) for i in range(0, count): entry_offset = 32 + (i * 32) guid = self._guid(meta_buffer[entry_offset:entry_offset + 16]) if guid == desired_guid: # Found the item we are looking for by id. # Stop our region from capturing item_offset, item_length, _reserved = struct.unpack( ' 1: all_formats = [str(inspector) for inspector in detections] raise ImageFormatError( 'Multiple formats detected: %s' % ', '.join(all_formats)) return inspectors['raw'] if not detections else detections[0] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/property_utils.py0000664000175000017500000002257200000000000021265 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import configparser import re from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy import glance.api.policy from glance.common import exception from glance.i18n import _, _LE, _LW CONFIG = configparser.ConfigParser() LOG = logging.getLogger(__name__) property_opts = [ cfg.StrOpt('property_protection_file', help=_(""" The location of the property protection file. Provide a valid path to the property protection file which contains the rules for property protections and the roles/policies associated with them. A property protection file, when set, restricts the Glance image properties to be created, read, updated and/or deleted by a specific set of users that are identified by either roles or policies. If this configuration option is not set, by default, property protections won't be enforced. If a value is specified and the file is not found, the glance-api service will fail to start. More information on property protections can be found at: https://docs.openstack.org/glance/latest/admin/property-protections.html Possible values: * Empty string * Valid path to the property protection configuration file Related options: * property_protection_rule_format """)), cfg.StrOpt('property_protection_rule_format', default='roles', choices=('roles', 'policies'), help=_(""" Rule format for property protection. Provide the desired way to set property protection on Glance image properties. The two permissible values are ``roles`` and ``policies``. The default value is ``roles``. If the value is ``roles``, the property protection file must contain a comma separated list of user roles indicating permissions for each of the CRUD operations on each property being protected. If set to ``policies``, a policy defined in policy.yaml is used to express property protections for each of the CRUD operations. Examples of how property protections are enforced based on ``roles`` or ``policies`` can be found at: https://docs.openstack.org/glance/latest/admin/property-protections.html#examples Possible values: * roles * policies Related options: * property_protection_file """)), ] CONF = cfg.CONF CONF.register_opts(property_opts) # NOTE (spredzy): Due to the particularly lengthy name of the exception # and the number of occurrence it is raise in this file, a variable is # created InvalidPropProtectConf = exception.InvalidPropertyProtectionConfiguration def is_property_protection_enabled(): if CONF.property_protection_file: return True return False class PropertyRules(object): def __init__(self, policy_enforcer=None): self.rules = [] self.prop_exp_mapping = {} self.policies = [] self.policy_enforcer = policy_enforcer or glance.api.policy.Enforcer() self.prop_prot_rule_format = CONF.property_protection_rule_format self.prop_prot_rule_format = self.prop_prot_rule_format.lower() self._load_rules() def _load_rules(self): try: conf_file = CONF.find_file(CONF.property_protection_file) CONFIG.read(conf_file) except Exception as e: msg = (_LE("Couldn't find property protection file %(file)s: " "%(error)s.") % {'file': CONF.property_protection_file, 'error': e}) LOG.error(msg) raise InvalidPropProtectConf() if self.prop_prot_rule_format not in ['policies', 'roles']: msg = _LE("Invalid value '%s' for " "'property_protection_rule_format'. " "The permitted values are " "'roles' and 'policies'") % self.prop_prot_rule_format LOG.error(msg) raise InvalidPropProtectConf() operations = ['create', 'read', 'update', 'delete'] properties = CONFIG.sections() for property_exp in properties: property_dict = {} compiled_rule = self._compile_rule(property_exp) for operation in operations: try: permissions = CONFIG.get(property_exp, operation) except configparser.NoOptionError: raise InvalidPropProtectConf() if permissions: if self.prop_prot_rule_format == 'policies': if ',' in permissions: LOG.error( _LE("Multiple policies '%s' not allowed " "for a given operation. Policies can be " "combined in the policy file"), permissions) raise InvalidPropProtectConf() self.prop_exp_mapping[compiled_rule] = property_exp self._add_policy_rules(property_exp, operation, permissions) permissions = [permissions] else: permissions = [permission.strip() for permission in permissions.split(',')] if '@' in permissions and '!' in permissions: msg = (_LE( "Malformed property protection rule in " "[%(prop)s] %(op)s=%(perm)s: '@' and '!' " "are mutually exclusive") % dict(prop=property_exp, op=operation, perm=permissions)) LOG.error(msg) raise InvalidPropProtectConf() property_dict[operation] = permissions else: property_dict[operation] = [] LOG.warning( _LW('Property protection on operation %(operation)s' ' for rule %(rule)s is not found. No role will be' ' allowed to perform this operation.'), {'operation': operation, 'rule': property_exp}) self.rules.append((compiled_rule, property_dict)) def _compile_rule(self, rule): try: return re.compile(rule) except Exception as e: msg = (_LE("Encountered a malformed property protection rule" " %(rule)s: %(error)s.") % {'rule': rule, 'error': e}) LOG.error(msg) raise InvalidPropProtectConf() def _add_policy_rules(self, property_exp, action, rule): """Add policy rules to the policy enforcer. For example, if the file listed as property_protection_file has: [prop_a] create = glance_creator then the corresponding policy rule would be: "prop_a:create": "rule:glance_creator" where glance_creator is defined in policy.yaml. For example: "glance_creator": "role:admin or role:glance_create_user" """ rule = "rule:%s" % rule rule_name = "%s:%s" % (property_exp, action) rule_dict = policy.Rules.from_dict({ rule_name: rule }) self.policy_enforcer.add_rules(rule_dict) def _check_policy(self, property_exp, action, context): try: action = ":".join([property_exp, action]) self.policy_enforcer.enforce(context, action, {}, registered=False) except exception.Forbidden: return False return True def check_property_rules(self, property_name, action, context): roles = context.roles # Include service roles to check if an action can be # performed on the property or not if context.service_roles: roles.extend(context.service_roles) if not self.rules: return True if action not in ['create', 'read', 'update', 'delete']: return False for rule_exp, rule in self.rules: if rule_exp.search(str(property_name)): break else: # no matching rules return False rule_roles = rule.get(action) if rule_roles: if '!' in rule_roles: return False elif '@' in rule_roles: return True if self.prop_prot_rule_format == 'policies': prop_exp_key = self.prop_exp_mapping[rule_exp] return self._check_policy(prop_exp_key, action, context) if set(roles).intersection(set([role.lower() for role in rule_roles])): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/removed_config.py0000664000175000017500000000251200000000000021137 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.i18n import _ from oslo_config import cfg removed_opts = [ cfg.BoolOpt('owner_is_tenant', default=True, help=_(""" This option has been removed in Wallaby. Because there is no migration path for installations that had owner_is_tenant==False, we have defined this option so that the code can probe the config file and refuse to start the api service if the deployment has been using that setting. """)), ] def register_removed_options(): # NOTE(cyril): This should only be called when we need to use options that # have been removed and are therefore no longer relevant. This is the case # of upgrade checks, for instance. cfg.CONF.register_opts(removed_opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.858303 glance-29.0.0/glance/common/scripts/0000775000175000017500000000000000000000000017266 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/__init__.py0000664000175000017500000000460700000000000021406 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from glance.common.scripts.api_image_import import main as api_image_import from glance.common.scripts.image_import import main as image_import from glance.i18n import _LE, _LI LOG = logging.getLogger(__name__) def run_task(task_id, task_type, context, task_repo=None, image_repo=None, image_factory=None): # TODO(nikhil): if task_repo is None get new task repo # TODO(nikhil): if image_repo is None get new image repo # TODO(nikhil): if image_factory is None get new image factory LOG.info(_LI("Loading known task scripts for task_id %(task_id)s " "of type %(task_type)s"), {'task_id': task_id, 'task_type': task_type}) if task_type == 'import': image_import.run(task_id, context, task_repo, image_repo, image_factory) elif task_type == 'api_image_import': api_image_import.run(task_id, context, task_repo, image_repo, image_factory) else: msg = _LE("This task type %(task_type)s is not supported by the " "current deployment of Glance. Please refer the " "documentation provided by OpenStack or your operator " "for more information.") % {'task_type': task_type} LOG.error(msg) task = task_repo.get(task_id) task.fail(msg) if task_repo: task_repo.save(task) else: LOG.error(_LE("Failed to save task %(task_id)s in DB as task_repo " "is %(task_repo)s"), {"task_id": task_id, "task_repo": task_repo}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.858303 glance-29.0.0/glance/common/scripts/api_image_import/0000775000175000017500000000000000000000000022573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/api_image_import/__init__.py0000664000175000017500000000000000000000000024672 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/api_image_import/main.py0000664000175000017500000001246000000000000024074 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'run', ] from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from glance.api.v2 import images as v2_api from glance.common import exception from glance.common.scripts import utils as script_utils from glance.common import store_utils from glance.i18n import _ LOG = logging.getLogger(__name__) def run(t_id, context, task_repo, image_repo, image_factory): LOG.info('Task %(task_id)s beginning image import ' 'execution.', {'task_id': t_id}) _execute(t_id, task_repo, image_repo, image_factory) # NOTE(nikhil): This lock prevents more than N number of threads to be spawn # simultaneously. The number N represents the number of threads in the # executor pool. The value is set to 10 in the eventlet executor. @lockutils.synchronized("glance_image_import") def _execute(t_id, task_repo, image_repo, image_factory): task = script_utils.get_task(task_repo, t_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return try: task_input = script_utils.unpack_task_input(task) image_id = task_input.get('image_id') task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary err_msg = ("Error: " + str(type(e)) + ': ' + encodeutils.exception_to_unicode(e)) log_msg = err_msg + ("Task ID %s" % task.task_id) LOG.exception(log_msg) task.fail(_(err_msg)) # noqa finally: task_repo.save(task) def import_image(image_repo, image_factory, task_input, task_id, uri): original_image = v2_api.create_image(image_repo, image_factory, task_input.get('image_properties'), task_id) # NOTE: set image status to saving just before setting data original_image.status = 'saving' image_repo.save(original_image) image_id = original_image.image_id # NOTE: Retrieving image from the database because the Image object # returned from create_image method does not have appropriate factories # wrapped around it. new_image = image_repo.get(image_id) set_image_data(new_image, uri, task_id) try: # NOTE: Check if the Image is not deleted after setting the data # before saving the active image. Here if image status is # saving, then new_image is saved as it contains updated location, # size, virtual_size and checksum information and the status of # new_image is already set to active in set_image_data() call. image = image_repo.get(image_id) if image.status == 'saving': image_repo.save(new_image) return image_id else: msg = _("The Image %(image_id)s object being created by this task " "%(task_id)s, is no longer in valid status for further " "processing.") % {"image_id": image_id, "task_id": task_id} raise exception.Conflict(msg) except (exception.Conflict, exception.NotFound, exception.NotAuthenticated): with excutils.save_and_reraise_exception(): if new_image.locations: for location in new_image.locations: store_utils.delete_image_location_from_backend( new_image.context, image_id, location) def set_image_data(image, uri, task_id, backend=None): data_iter = None try: LOG.info("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported", {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter, backend=backend) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning("Task %(task_id)s failed with exception %(error)s", {"error": encodeutils.exception_to_unicode(e), "task_id": task_id}) LOG.info("Task %(task_id)s: Could not import image file" " %(image_data)s", {"image_data": uri, "task_id": task_id}) finally: if hasattr(data_iter, 'close'): data_iter.close() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.858303 glance-29.0.0/glance/common/scripts/image_import/0000775000175000017500000000000000000000000021742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/image_import/__init__.py0000664000175000017500000000000000000000000024041 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/image_import/main.py0000664000175000017500000001503400000000000023243 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'run', ] from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from glance.api.v2 import images as v2_api from glance.common import exception from glance.common.scripts import utils as script_utils from glance.common import store_utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) def run(t_id, context, task_repo, image_repo, image_factory): LOG.info(_LI('Task %(task_id)s beginning import ' 'execution.'), {'task_id': t_id}) _execute(t_id, task_repo, image_repo, image_factory) # NOTE(nikhil): This lock prevents more than N number of threads to be spawn # simultaneously. The number N represents the number of threads in the # executor pool. The value is set to 10 in the eventlet executor. @lockutils.synchronized("glance_import") def _execute(t_id, task_repo, image_repo, image_factory): task = script_utils.get_task(task_repo, t_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return try: task_input = script_utils.unpack_task_input(task) uri = script_utils.validate_location_uri(task_input.get('import_from')) image_id = import_image(image_repo, image_factory, task_input, t_id, uri) task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary err_msg = ("Error: " + str(type(e)) + ': ' + encodeutils.exception_to_unicode(e)) log_msg = _LE(err_msg + ("Task ID %s" % task.task_id)) # noqa LOG.exception(log_msg) task.fail(_LE(err_msg)) # noqa finally: task_repo.save(task) def import_image(image_repo, image_factory, task_input, task_id, uri): original_image = create_image(image_repo, image_factory, task_input.get('image_properties'), task_id) # NOTE: set image status to saving just before setting data original_image.status = 'saving' image_repo.save(original_image) image_id = original_image.image_id # NOTE: Retrieving image from the database because the Image object # returned from create_image method does not have appropriate factories # wrapped around it. new_image = image_repo.get(image_id) set_image_data(new_image, uri, task_id) try: # NOTE: Check if the Image is not deleted after setting the data # before saving the active image. Here if image status is # saving, then new_image is saved as it contains updated location, # size, virtual_size and checksum information and the status of # new_image is already set to active in set_image_data() call. image = image_repo.get(image_id) if image.status == 'saving': image_repo.save(new_image) return image_id else: msg = _("The Image %(image_id)s object being created by this task " "%(task_id)s, is no longer in valid status for further " "processing.") % {"image_id": image_id, "task_id": task_id} raise exception.Conflict(msg) except (exception.Conflict, exception.NotFound, exception.NotAuthenticated): with excutils.save_and_reraise_exception(): if new_image.locations: for location in new_image.locations: store_utils.delete_image_location_from_backend( new_image.context, image_id, location) def create_image(image_repo, image_factory, image_properties, task_id): properties = {} # NOTE: get the base properties for key in v2_api.get_base_properties(): try: properties[key] = image_properties.pop(key) except KeyError: LOG.debug("Task ID %(task_id)s: Ignoring property %(k)s for " "setting base properties while creating " "Image.", {'task_id': task_id, 'k': key}) # NOTE: get the rest of the properties and pass them as # extra_properties for Image to be created with them. properties['extra_properties'] = image_properties script_utils.set_base_image_properties(properties=properties) image = image_factory.new_image(**properties) image_repo.add(image) return image def set_image_data(image, uri, task_id, backend=None, set_active=True, callback=None): data_iter = None try: LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported"), {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) if callback: # If a callback was provided, wrap our data iterator to call # the function every 60 seconds. data_iter = script_utils.CallbackIterator( data_iter, callback, min_interval=60) image.set_data(data_iter, backend=backend, set_active=set_active) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning(_LW("Task %(task_id)s failed with exception " "%(error)s"), {"error": encodeutils.exception_to_unicode(e), "task_id": task_id}) LOG.info(_LI("Task %(task_id)s: Could not import image file" " %(image_data)s"), {"image_data": uri, "task_id": task_id}) finally: if hasattr(data_iter, 'close'): data_iter.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/scripts/utils.py0000664000175000017500000002116300000000000021003 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'get_task', 'unpack_task_input', 'set_base_image_properties', 'validate_location_uri', 'get_image_data_iter', ] import urllib from oslo_log import log as logging from oslo_utils import timeutils from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) def get_task(task_repo, task_id): """Gets a TaskProxy object. :param task_repo: TaskRepo object used to perform DB operations :param task_id: ID of the Task """ task = None try: task = task_repo.get(task_id) except exception.NotFound: msg = _LE('Task not found for task_id %s') % task_id LOG.exception(msg) return task def unpack_task_input(task): """Verifies and returns valid task input dictionary. :param task: Task domain object """ task_type = task.type task_input = task.task_input if task_type == 'api_image_import': if not task_input: msg = _("Input to api_image_import task is empty.") raise exception.Invalid(msg) if 'image_id' not in task_input: msg = _("Missing required 'image_id' field") raise exception.Invalid(msg) elif task_type == 'location_import': if not task_input: msg = _("Input to location_import task is empty.") raise exception.Invalid(msg) for key in ['image_id', 'loc_url', 'validation_data']: if key not in task_input: msg = (_("Input does not contain '%(key)s' field") % {"key": key}) raise exception.Invalid(msg) else: for key in ["import_from", "import_from_format", "image_properties"]: if key not in task_input: msg = (_("Input does not contain '%(key)s' field") % {"key": key}) raise exception.Invalid(msg) return task_input def set_base_image_properties(properties=None): """Sets optional base properties for creating Image. :param properties: Input dict to set some base properties """ if isinstance(properties, dict) and len(properties) == 0: # TODO(nikhil): We can make these properties configurable while # implementing the pipeline logic for the scripts. The below shown # are placeholders to show that the scripts work on 'devstack' # environment. properties['disk_format'] = 'qcow2' properties['container_format'] = 'bare' def validate_location_uri(location): """Validate location uri into acceptable format. :param location: Location uri to be validated """ if not location: raise exception.BadStoreUri(_('Invalid location: %s') % location) elif location.startswith(('http://', 'https://')): return location # NOTE: file type uri is being avoided for security reasons, # see LP bug #942118 #1400966. elif location.startswith(("file:///", "filesystem:///")): msg = _("File based imports are not allowed. Please use a non-local " "source of image data.") # NOTE: raise BadStoreUri and let the encompassing block save the error # msg in the task.message. raise exception.BadStoreUri(msg) else: # TODO(nikhil): add other supported uris supported = ['http', ] msg = _("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} raise urllib.error.URLError(msg) def get_image_data_iter(uri): """Returns iterable object either for local file or uri :param uri: uri (remote or local) to the datasource we want to iterate Validation/sanitization of the uri is expected to happen before we get here. """ # NOTE(flaper87): This is safe because the input uri is already # verified before the task is created. if uri.startswith("file://"): uri = uri.split("file://")[-1] # NOTE(flaper87): The caller of this function expects to have # an iterable object. FileObjects in python are iterable, therefore # we are returning it as is. # The file descriptor will be eventually cleaned up by the garbage # collector once its ref-count is dropped to 0. That is, when there # won't be any references pointing to this file. # # We're not using StringIO or other tools to avoid reading everything # into memory. Some images may be quite heavy. return open(uri, "rb") return urllib.request.urlopen(uri) class CallbackIterator(object): """A proxy iterator that calls a callback function periodically This is used to wrap a reading file object and proxy its chunks through to another caller. Periodically, the callback function will be called with information about the data processed so far, allowing for status updating or cancel flag checking. The function can be called every time we process a chunk, or only after we have processed a certain amount of data since the last call. :param source: A source iterator whose content will be proxied through this object. :param callback: A function to be called periodically while iterating. The signature should be fn(chunk_bytes, total_bytes), where chunk is the number of bytes since the last call of the callback, and total_bytes is the total amount copied thus far. :param min_interval: Limit the calls to callback to only when this many seconds have elapsed since the last callback (a close() or final iteration may fire the callback in less time to ensure completion). """ def __init__(self, source, callback, min_interval=None): self._source = source self._callback = callback self._min_interval = min_interval self._chunk_bytes = 0 self._total_bytes = 0 self._timer = None @property def callback_due(self): """Indicates if a callback should be made. If no time-based limit is set, this will always be True. If a limit is set, then this returns True exactly once, resetting the timer when it does. """ if not self._min_interval: return True if not self._timer: self._timer = timeutils.StopWatch(self._min_interval) self._timer.start() if self._timer.expired(): self._timer.restart() return True else: return False def __iter__(self): return self def __next__(self): try: chunk = next(self._source) except StopIteration: # NOTE(danms): Make sure we call the callback the last # time if we have processed data since the last one. self._call_callback(b'', is_last=True) raise self._call_callback(chunk) return chunk def close(self): self._call_callback(b'', is_last=True) if hasattr(self._source, 'close'): return self._source.close() def _call_callback(self, chunk, is_last=False): self._total_bytes += len(chunk) self._chunk_bytes += len(chunk) if not self._chunk_bytes: # NOTE(danms): Never call the callback if we haven't processed # any data since the last time return if is_last or self.callback_due: # FIXME(danms): Perhaps we should only abort the read if # the callback raises a known abort exception, otherwise # log and swallow. Need to figure out what exception # read() callers would be expecting that we could raise # from here. self._callback(self._chunk_bytes, self._total_bytes) self._chunk_bytes = 0 def read(self, size=None): chunk = self._source.read(size) self._call_callback(chunk) return chunk ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/store_utils.py0000664000175000017500000002200300000000000020522 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import urllib.parse as urlparse import glance_store as store_api from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import glance.db as db_api from glance.i18n import _LE, _LW from glance import scrubber LOG = logging.getLogger(__name__) CONF = cfg.CONF RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config']) def check_reserved_stores(enabled_stores): for store in enabled_stores: if store.startswith("os_glance_"): return True return False def safe_delete_from_backend(context, image_id, location): """ Given a location, delete an image from the store and update location status to db. This function try to handle all known exceptions which might be raised by those calls on store and DB modules in its implementation. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ try: if CONF.enabled_backends: backend = location['metadata'].get('store') ret = store_api.delete(location['url'], backend, context=context) else: ret = store_api.delete_from_backend(location['url'], context=context) location['status'] = 'deleted' if 'id' in location: db_api.get_api().image_location_delete(context, image_id, location['id'], 'deleted') return ret except store_api.NotFound: msg = ("The image data for %(iid)s was not found in the store. " "The image record has been updated to reflect " "this." % {'iid': image_id}) LOG.warning(msg) except store_api.StoreDeleteNotSupported as e: LOG.warning(encodeutils.exception_to_unicode(e)) except store_api.UnsupportedBackend: exc_type = sys.exc_info()[0].__name__ msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') % dict(image_id=image_id, exc=exc_type)) LOG.error(msg) def schedule_delayed_delete_from_backend(context, image_id, location): """ Given a location, schedule the deletion of an image location and update location status to db. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ db_queue = scrubber.get_scrub_queue() context = None ret = db_queue.add_location(image_id, location) if ret: location['status'] = 'pending_delete' if 'id' in location: # NOTE(zhiyan): New added image location entry will has no 'id' # field since it has not been saved to DB. db_api.get_api().image_location_delete(context, image_id, location['id'], 'pending_delete') else: db_api.get_api().image_location_add(context, image_id, location) return ret def delete_image_location_from_backend(context, image_id, location): """ Given a location, immediately or schedule the deletion of an image location and update location status to db. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ deleted = False if CONF.delayed_delete: deleted = schedule_delayed_delete_from_backend(context, image_id, location) if not deleted: # NOTE(zhiyan) If image metadata has not been saved to DB # such as uploading process failure then we can't use # location status mechanism to support image pending delete. safe_delete_from_backend(context, image_id, location) def validate_external_location(uri): """ Validate if URI of external location are supported. Only over non-local store types are OK, i.e. Swift, HTTP. Note the absence of 'file://' for security reasons, see LP bug #942118, 1400966, 'swift+config://' is also absent for security reasons, see LP bug #1334196. :param uri: The URI of external image location. :returns: Whether given URI of external image location are OK. """ if not uri: return False # TODO(zhiyan): This function could be moved to glance_store. # TODO(gm): Use a whitelist of allowed schemes scheme = urlparse.urlparse(uri).scheme known_schemes = store_api.get_known_schemes() if CONF.enabled_backends: known_schemes = store_api.get_known_schemes_for_multi_store() return (scheme in known_schemes and scheme not in RESTRICTED_URI_SCHEMAS) def _get_store_id_from_uri(uri): scheme = urlparse.urlparse(uri).scheme location_map = store_api.location.SCHEME_TO_CLS_BACKEND_MAP url_matched = False if scheme not in location_map: LOG.warning("Unknown scheme '%(scheme)s' found in uri '%(uri)s'", { 'scheme': scheme, 'uri': uri}) return for store in location_map[scheme]: store_instance = location_map[scheme][store]['store'] url_prefix = store_instance.url_prefix if url_prefix and uri.startswith(url_prefix): url_matched = True break if url_matched: return u"%s" % store else: LOG.warning("Invalid location uri %s", uri) return def update_store_in_locations(context, image, image_repo): store_updated = False for loc in image.locations: if (not loc['metadata'].get( 'store') or loc['metadata'].get( 'store') not in CONF.enabled_backends): if loc['url'].startswith("cinder://"): _update_cinder_location_and_store_id(context, loc) store_id = _get_store_id_from_uri(loc['url']) if store_id: if 'store' in loc['metadata']: old_store = loc['metadata']['store'] if old_store != store_id: LOG.debug("Store '%(old)s' has changed to " "'%(new)s' by operator, updating " "the same in the location of image " "'%(id)s'", {'old': old_store, 'new': store_id, 'id': image.image_id}) store_updated = True loc['metadata']['store'] = store_id if store_updated: image_repo.save(image) def _update_cinder_location_and_store_id(context, loc): """Update store location of legacy images While upgrading from single cinder store to multiple stores, the images having a store configured with a volume type matching the image-volume's type will be migrated/associated to that store and their location url will be updated respectively to the new format i.e. cinder://store-id/volume-id If there is no store configured for the image, the location url will not be updated. """ uri = loc['url'] volume_id = loc['url'].split("/")[-1] scheme = urlparse.urlparse(uri).scheme location_map = store_api.location.SCHEME_TO_CLS_BACKEND_MAP if scheme not in location_map: LOG.warning(_LW("Unknown scheme '%(scheme)s' found in uri '%(uri)s'"), {'scheme': scheme, 'uri': uri}) return for store in location_map[scheme]: store_instance = location_map[scheme][store]['store'] if store_instance.is_image_associated_with_store(context, volume_id): url_prefix = store_instance.url_prefix loc['url'] = "%s/%s" % (url_prefix, volume_id) loc['metadata']['store'] = "%s" % store return def get_updated_store_location(locations, context=None): for loc in locations: if loc['url'].startswith("cinder://") and context: _update_cinder_location_and_store_id(context, loc) continue store_id = _get_store_id_from_uri(loc['url']) if store_id: loc['metadata']['store'] = store_id return locations def get_dir_separator(): separator = '' staging_dir = "file://%s" % getattr( CONF, 'os_glance_staging_store').filesystem_store_datadir if not staging_dir.endswith('/'): separator = '/' return separator, staging_dir ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/swift_store_utils.py0000664000175000017500000001136600000000000021750 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import configparser from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance.i18n import _, _LE swift_opts = [ cfg.StrOpt('default_swift_reference', default="ref1", help=_(""" Reference to default Swift account/backing store parameters. Provide a string value representing a reference to the default set of parameters required for using swift account/backing store for image storage. The default reference value for this configuration option is 'ref1'. This configuration option dereferences the parameters and facilitates image storage in Swift storage backend every time a new image is added. Possible values: * A valid string value Related options: * None """)), cfg.StrOpt('swift_store_auth_address', deprecated_reason=(""" The option auth_address in the Swift back-end configuration file is used instead. """), help=_('The address where the Swift authentication service ' 'is listening.')), cfg.StrOpt('swift_store_user', secret=True, deprecated_reason=(""" The option 'user' in the Swift back-end configuration file is set instead. """), help=_('The user to authenticate against the Swift ' 'authentication service.')), cfg.StrOpt('swift_store_key', secret=True, deprecated_reason=(""" The option 'key' in the Swift back-end configuration file is used to set the authentication key instead. """), help=_('Auth key for the user authenticating against the ' 'Swift authentication service.')), cfg.StrOpt('swift_store_config_file', secret=True, help=_(""" File containing the swift account(s) configurations. Include a string value representing the path to a configuration file that has references for each of the configured Swift account(s)/backing stores. By default, no file path is specified and customized Swift referencing is disabled. Configuring this option is highly recommended while using Swift storage backend for image storage as it helps avoid storage of credentials in the database. Possible values: * None * String value representing a valid configuration file path Related options: * None """)), ] CONFIG = configparser.ConfigParser() LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(swift_opts) def is_multiple_swift_store_accounts_enabled(): if CONF.swift_store_config_file is None: return False return True class SwiftParams(object): def __init__(self): if is_multiple_swift_store_accounts_enabled(): self.params = self._load_config() else: self.params = self._form_default_params() def _form_default_params(self): default = {} if (CONF.swift_store_user and CONF.swift_store_key and CONF.swift_store_auth_address): default['user'] = CONF.swift_store_user default['key'] = CONF.swift_store_key default['auth_address'] = CONF.swift_store_auth_address return {CONF.default_swift_reference: default} return {} def _load_config(self): try: conf_file = CONF.find_file(CONF.swift_store_config_file) CONFIG.read(conf_file) except Exception as e: msg = (_LE("swift config file %(conf_file)s:%(exc)s not found") % {'conf_file': CONF.swift_store_config_file, 'exc': e}) LOG.error(msg) raise exception.InvalidSwiftStoreConfiguration() account_params = {} account_references = CONFIG.sections() for ref in account_references: reference = {} try: reference['auth_address'] = CONFIG.get(ref, 'auth_address') reference['user'] = CONFIG.get(ref, 'user') reference['key'] = CONFIG.get(ref, 'key') account_params[ref] = reference except (ValueError, SyntaxError, configparser.NoOptionError): LOG.exception(_LE("Invalid format of swift store config " "cfg")) return account_params ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/timeutils.py0000664000175000017500000000551200000000000020173 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Time related utilities and helper functions. """ import datetime import iso8601 from oslo_utils import encodeutils # ISO 8601 extended time format with microseconds _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz) return st def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(encodeutils.exception_to_unicode(e)) except TypeError as e: raise ValueError(encodeutils.exception_to_unicode(e)) def utcnow(with_timezone=False): """Overridable version of utils.utcnow that can return a TZ-aware datetime. """ if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time if with_timezone: return datetime.datetime.now(tz=iso8601.iso8601.UTC) return datetime.datetime.utcnow() def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def iso8601_from_timestamp(timestamp, microsecond=False): """Returns an iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp), microsecond) utcnow.override_time = None def delta_seconds(before, after): """Return the difference between two timing objects. Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before return datetime.timedelta.total_seconds(delta) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/trust_auth.py0000664000175000017500000001070600000000000020357 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions as ka_exceptions from keystoneauth1 import loading as ka_loading from keystoneclient.v3 import client as ks_client from oslo_config import cfg from oslo_log import log as logging CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('timeout'), group='keystone_authtoken') LOG = logging.getLogger(__name__) class TokenRefresher(object): """Class that responsible for token refreshing with trusts""" def __init__(self, user_plugin, user_project, user_roles): """Prepare all parameters and clients required to refresh token""" # step 1: create trust to ensure that we can always update token # trustor = user who made the request trustor_client = self._load_client(user_plugin) trustor_id = trustor_client.session.get_user_id() # get trustee user client that impersonates main user trustee_user_auth = ka_loading.load_auth_from_conf_options( CONF, 'keystone_authtoken') # save service user client because we need new service token # to refresh trust-scoped client later self.trustee_user_client = self._load_client(trustee_user_auth) trustee_id = self.trustee_user_client.session.get_user_id() self.trust_id = trustor_client.trusts.create(trustor_user=trustor_id, trustee_user=trustee_id, impersonation=True, role_names=user_roles, project=user_project).id LOG.debug("Trust %s has been created.", self.trust_id) # step 2: postpone trust-scoped client initialization # until we need to refresh the token self.trustee_client = None def refresh_token(self): """Receive new token if user need to update old token :return: new token that can be used for authentication """ LOG.debug("Requesting the new token with trust %s", self.trust_id) if self.trustee_client is None: self.trustee_client = self._refresh_trustee_client() try: return self.trustee_client.session.get_token() except ka_exceptions.Unauthorized: # in case of Unauthorized exceptions try to refresh client because # service user token may expired self.trustee_client = self._refresh_trustee_client() return self.trustee_client.session.get_token() def release_resources(self): """Release keystone resources required for refreshing""" try: if self.trustee_client is None: self._refresh_trustee_client().trusts.delete(self.trust_id) else: self.trustee_client.trusts.delete(self.trust_id) except ka_exceptions.Unauthorized: # service user token may expire when we are trying to delete token # so need to update client to ensure that this is not the reason # of failure self.trustee_client = self._refresh_trustee_client() self.trustee_client.trusts.delete(self.trust_id) def _refresh_trustee_client(self): # Remove project_name and project_id, since we need a trust scoped # auth object kwargs = { 'project_name': None, 'project_domain_name': None, 'project_id': None, 'trust_id': self.trust_id } trustee_auth = ka_loading.load_auth_from_conf_options( CONF, 'keystone_authtoken', **kwargs) return self._load_client(trustee_auth) @staticmethod def _load_client(plugin): # load client from auth settings and user plugin sess = ka_loading.load_session_from_conf_options( CONF, 'keystone_authtoken', auth=plugin) return ks_client.Client(session=sess) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/utils.py0000664000175000017500000006352500000000000017324 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 SoftLayer Technologies, Inc. # Copyright 2015 Mirantis, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import errno try: from eventlet import sleep except ImportError: from time import sleep from eventlet.green import socket import functools import os import re import urllib import glance_store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import strutils from webob import exc from glance.common import exception from glance.common import timeutils from glance.common import wsgi from glance.i18n import _, _LE, _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) # Whitelist of v1 API headers of form x-image-meta-xxx IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', 'x-image-meta-is_public', 'x-image-meta-disk_format', 'x-image-meta-container_format', 'x-image-meta-name', 'x-image-meta-status', 'x-image-meta-copy_from', 'x-image-meta-uri', 'x-image-meta-checksum', 'x-image-meta-created_at', 'x-image-meta-updated_at', 'x-image-meta-deleted_at', 'x-image-meta-min_ram', 'x-image-meta-min_disk', 'x-image-meta-owner', 'x-image-meta-store', 'x-image-meta-id', 'x-image-meta-protected', 'x-image-meta-deleted', 'x-image-meta-virtual_size'] GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD' def chunkreadable(iter, chunk_size=65536): """ Wrap a readable iterator with a reader yielding chunks of a preferred size, otherwise leave iterator unchanged. :param iter: an iter which may also be readable :param chunk_size: maximum size of chunk """ return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter def chunkiter(fp, chunk_size=65536): """ Return an iterator to a file-like obj which yields fixed size chunks :param fp: a file-like object :param chunk_size: maximum size of chunk """ while True: chunk = fp.read(chunk_size) if chunk: yield chunk else: break def cooperative_iter(iter): """ Return an iterator which schedules after each iteration. This can prevent eventlet thread starvation. :param iter: an iterator to wrap """ try: for chunk in iter: sleep(0) yield chunk except Exception as err: with excutils.save_and_reraise_exception(): msg = _LE("Error: cooperative_iter exception %s") % err LOG.error(msg) def cooperative_read(fd): """ Wrap a file descriptor's read with a partial function which schedules after each read. This can prevent eventlet thread starvation. :param fd: a file descriptor to wrap """ def readfn(*args): result = fd.read(*args) sleep(0) return result return readfn MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit CONF.import_group('import_filtering_opts', 'glance.async_.flows._internal_plugins') def validate_import_uri(uri): """Validate requested uri for Image Import web-download. :param uri: target uri to be validated """ if not uri: return False parsed_uri = urllib.parse.urlparse(uri) scheme = parsed_uri.scheme host = parsed_uri.hostname port = parsed_uri.port wl_schemes = CONF.import_filtering_opts.allowed_schemes bl_schemes = CONF.import_filtering_opts.disallowed_schemes wl_hosts = CONF.import_filtering_opts.allowed_hosts bl_hosts = CONF.import_filtering_opts.disallowed_hosts wl_ports = CONF.import_filtering_opts.allowed_ports bl_ports = CONF.import_filtering_opts.disallowed_ports # NOTE(jokke): Checking if both allowed and disallowed are defined and # logging it to inform only allowed will be obeyed. if wl_schemes and bl_schemes: bl_schemes = [] LOG.debug("Both allowed and disallowed schemes has been configured. " "Will only process allowed list.") if wl_hosts and bl_hosts: bl_hosts = [] LOG.debug("Both allowed and disallowed hosts has been configured. " "Will only process allowed list.") if wl_ports and bl_ports: bl_ports = [] LOG.debug("Both allowed and disallowed ports has been configured. " "Will only process allowed list.") if not scheme or ((wl_schemes and scheme not in wl_schemes) or parsed_uri.scheme in bl_schemes): return False if not host or ((wl_hosts and host not in wl_hosts) or host in bl_hosts): return False if port and ((wl_ports and port not in wl_ports) or port in bl_ports): return False return True class CooperativeReader(object): """ An eventlet thread friendly class for reading in image data. When accessing data either through the iterator or the read method we perform a sleep to allow a co-operative yield. When there is more than one image being uploaded/downloaded this prevents eventlet thread starvation, ie allows all threads to be scheduled periodically rather than having the same thread be continuously active. """ def __init__(self, fd): """ :param fd: Underlying image file object """ self.fd = fd self.iterator = None # NOTE(markwash): if the underlying supports read(), overwrite the # default iterator-based implementation with cooperative_read which # is more straightforward if hasattr(fd, 'read'): self.read = cooperative_read(fd) else: self.iterator = None self.buffer = b'' self.position = 0 def read(self, length=None): """Return the requested amount of bytes, fetching the next chunk of the underlying iterator when needed. This is replaced with cooperative_read in __init__ if the underlying fd already supports read(). """ if length is None: if len(self.buffer) - self.position > 0: # if no length specified but some data exists in buffer, # return that data and clear the buffer result = self.buffer[self.position:] self.buffer = b'' self.position = 0 return bytes(result) else: # otherwise read the next chunk from the underlying iterator # and return it as a whole. Reset the buffer, as subsequent # calls may specify the length try: if self.iterator is None: self.iterator = self.__iter__() return next(self.iterator) except StopIteration: return b'' finally: self.buffer = b'' self.position = 0 else: result = bytearray() while len(result) < length: if self.position < len(self.buffer): to_read = length - len(result) chunk = self.buffer[self.position:self.position + to_read] result.extend(chunk) # This check is here to prevent potential OOM issues if # this code is called with unreasonably high values of read # size. Currently it is only called from the HTTP clients # of Glance backend stores, which use httplib for data # streaming, which has readsize hardcoded to 8K, so this # check should never fire. Regardless it still worths to # make the check, as the code may be reused somewhere else. if len(result) >= MAX_COOP_READER_BUFFER_SIZE: raise exception.LimitExceeded() self.position += len(chunk) else: try: if self.iterator is None: self.iterator = self.__iter__() self.buffer = next(self.iterator) self.position = 0 except StopIteration: self.buffer = b'' self.position = 0 return bytes(result) return bytes(result) def __iter__(self): return cooperative_iter(self.fd.__iter__()) class LimitingReader(object): """ Reader designed to fail when reading image data past the configured allowable amount. """ def __init__(self, data, limit, exception_class=exception.ImageSizeLimitExceeded): """ :param data: Underlying image data object :param limit: maximum number of bytes the reader should allow :param exception_class: Type of exception to be raised """ self.data = data self.limit = limit self.bytes_read = 0 self.exception_class = exception_class def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: raise self.exception_class() else: yield chunk def read(self, i): result = self.data.read(i) self.bytes_read += len(result) if self.bytes_read > self.limit: raise self.exception_class() return result def image_meta_to_http_headers(image_meta): """ Returns a set of image metadata into a dict of HTTP headers that can be fed to either a Webob Request object or an httplib.HTTP(S)Connection object :param image_meta: Mapping of image metadata """ headers = {} for k, v in image_meta.items(): if v is not None: if k == 'properties': for pk, pv in v.items(): if pv is not None: headers["x-image-meta-property-%s" % pk.lower()] = str(pv) else: headers["x-image-meta-%s" % k.lower()] = str(v) return headers def get_image_meta_from_headers(response): """ Processes HTTP headers from a supplied response that match the x-image-meta and x-image-meta-property and returns a mapping of image metadata and properties :param response: Response to process """ result = {} properties = {} if hasattr(response, 'getheaders'): # httplib.HTTPResponse headers = response.getheaders() else: # webob.Response headers = response.headers.items() for key, value in headers: key = str(key.lower()) if key.startswith('x-image-meta-property-'): field_name = key[len('x-image-meta-property-'):].replace('-', '_') properties[field_name] = value or None elif key.startswith('x-image-meta-'): field_name = key[len('x-image-meta-'):].replace('-', '_') if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: msg = _("Bad header: %(header_name)s") % {'header_name': key} raise exc.HTTPBadRequest(msg, content_type="text/plain") result[field_name] = value or None result['properties'] = properties for key, nullable in [('size', False), ('min_disk', False), ('min_ram', False), ('virtual_size', True)]: if key in result: try: result[key] = int(result[key]) except ValueError: if nullable and result[key] == str(None): result[key] = None else: extra = (_("Cannot convert image %(key)s '%(value)s' " "to an integer.") % {'key': key, 'value': result[key]}) raise exception.InvalidParameterValue(value=result[key], param=key, extra_msg=extra) if result[key] is not None and result[key] < 0: extra = _('Cannot be a negative value.') raise exception.InvalidParameterValue(value=result[key], param=key, extra_msg=extra) for key in ('is_public', 'deleted', 'protected'): if key in result: result[key] = strutils.bool_from_string(result[key]) return result def create_mashup_dict(image_meta): """ Returns a dictionary-like mashup of the image core properties and the image custom properties from given image metadata. :param image_meta: metadata of image with core and custom properties """ d = {} for key, value in image_meta.items(): if isinstance(value, dict): for subkey, subvalue in create_mashup_dict(value).items(): if subkey not in image_meta: d[subkey] = subvalue else: d[key] = value return d def safe_mkdirs(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def mutating(func): """Decorator to enforce read-only logic""" @functools.wraps(func) def wrapped(self, req, *args, **kwargs): if req.context.read_only: msg = "Read-only access" LOG.debug(msg) raise exc.HTTPForbidden(msg, request=req, content_type="text/plain") return func(self, req, *args, **kwargs) return wrapped def setup_remote_pydev_debug(host, port): error_msg = _LE('Error setting up the debug environment. Verify that the' ' option pydev_worker_debug_host is pointing to a valid ' 'hostname or IP on which a pydev server is listening on' ' the port indicated by pydev_worker_debug_port.') try: try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) return True except Exception: with excutils.save_and_reraise_exception(): LOG.exception(error_msg) def get_test_suite_socket(): global GLANCE_TEST_SOCKET_FD_STR if GLANCE_TEST_SOCKET_FD_STR in os.environ: fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR]) sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) sock.listen(CONF.backlog) del os.environ[GLANCE_TEST_SOCKET_FD_STR] os.close(fd) return sock return None def is_valid_hostname(hostname): """Verify whether a hostname (not an FQDN) is valid.""" return re.match('^[a-zA-Z0-9-]+$', hostname) is not None def is_valid_fqdn(fqdn): """Verify whether a host is a valid FQDN.""" return re.match(r'^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None def parse_valid_host_port(host_port): """ Given a "host:port" string, attempts to parse it as intelligently as possible to determine if it is valid. This includes IPv6 [host]:port form, IPv4 ip:port form, and hostname:port or fqdn:port form. Invalid inputs will raise a ValueError, while valid inputs will return a (host, port) tuple where the port will always be of type int. """ try: try: host, port = netutils.parse_host_port(host_port) except Exception: raise ValueError(_('Host and port "%s" is not valid.') % host_port) if not netutils.is_valid_port(port): raise ValueError(_('Port "%s" is not valid.') % port) # First check for valid IPv6 and IPv4 addresses, then a generic # hostname. Failing those, if the host includes a period, then this # should pass a very generic FQDN check. The FQDN check for letters at # the tail end will weed out any hilariously absurd IPv4 addresses. if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or is_valid_hostname(host) or is_valid_fqdn(host)): raise ValueError(_('Host "%s" is not valid.') % host) except Exception as ex: raise ValueError(_('%s ' 'Please specify a host:port pair, where host is an ' 'IPv4 address, IPv6 address, hostname, or FQDN. If ' 'using an IPv6 address, enclose it in brackets ' 'separately from the port (i.e., ' '"[fe80::a:b:c]:9876").') % ex) return (host, int(port)) try: REGEX_4BYTE_UNICODE = re.compile('[\U00010000-\U0010ffff]') except re.error: # UCS-2 build case REGEX_4BYTE_UNICODE = re.compile('[\uD800-\uDBFF][\uDC00-\uDFFF]') def no_4byte_params(f): """ Checks that no 4 byte unicode characters are allowed in dicts' keys/values and string's parameters """ def wrapper(*args, **kwargs): def _is_match(some_str): return ( isinstance(some_str, str) and REGEX_4BYTE_UNICODE.findall(some_str) != [] ) def _check_dict(data_dict): # a dict of dicts has to be checked recursively for key, value in data_dict.items(): if isinstance(value, dict): _check_dict(value) else: if _is_match(key): msg = _("Property names can't contain 4 byte unicode.") raise exception.Invalid(msg) if _is_match(value): msg = (_("%s can't contain 4 byte unicode characters.") % key.title()) raise exception.Invalid(msg) for data_dict in [arg for arg in args if isinstance(arg, dict)]: _check_dict(data_dict) # now check args for str values for arg in args: if _is_match(arg): msg = _("Param values can't contain 4 byte unicode.") raise exception.Invalid(msg) # check kwargs as well, as params are passed as kwargs via # registry calls _check_dict(kwargs) return f(*args, **kwargs) return wrapper def stash_conf_values(): """ Make a copy of some of the current global CONF's settings. Allows determining if any of these values have changed when the config is reloaded. """ conf = { 'bind_host': CONF.bind_host, 'bind_port': CONF.bind_port, 'backlog': CONF.backlog, } return conf def split_filter_op(expression): """Split operator from threshold in an expression. Designed for use on a comparative-filtering query field. When no operator is found, default to an equality comparison. :param expression: the expression to parse :returns: a tuple (operator, threshold) parsed from expression """ left, sep, right = expression.partition(':') if sep: # If the expression is a date of the format ISO 8601 like # CCYY-MM-DDThh:mm:ss+hh:mm and has no operator, it should # not be partitioned, and a default operator of eq should be # assumed. try: timeutils.parse_isotime(expression) op = 'eq' threshold = expression except ValueError: op = left threshold = right else: op = 'eq' # default operator threshold = left # NOTE stevelle decoding escaped values may be needed later return op, threshold def validate_quotes(value): """Validate filter values Validation opening/closing quotes in the expression. """ open_quotes = True for i in range(len(value)): if value[i] == '"': if i and value[i - 1] == '\\': continue if open_quotes: if i and value[i - 1] != ',': msg = _("Invalid filter value %s. There is no comma " "before opening quotation mark.") % value raise exception.InvalidParameterValue(message=msg) else: if i + 1 != len(value) and value[i + 1] != ",": msg = _("Invalid filter value %s. There is no comma " "after closing quotation mark.") % value raise exception.InvalidParameterValue(message=msg) open_quotes = not open_quotes if not open_quotes: msg = _("Invalid filter value %s. The quote is not closed.") % value raise exception.InvalidParameterValue(message=msg) def split_filter_value_for_quotes(value): """Split filter values Split values by commas and quotes for 'in' operator, according api-wg. """ validate_quotes(value) tmp = re.compile(r''' "( # if found a double-quote [^\"\\]* # take characters either non-quotes or backslashes (?:\\. # take backslashes and character after it [^\"\\]*)* # take characters either non-quotes or backslashes ) # before double-quote ",? # a double-quote with comma maybe | ([^,]+),? # if not found double-quote take any non-comma # characters with comma maybe | , # if we have only comma take empty string ''', re.VERBOSE) return [val[0] or val[1] for val in re.findall(tmp, value)] def evaluate_filter_op(value, operator, threshold): """Evaluate a comparison operator. Designed for use on a comparative-filtering query field. :param value: evaluated against the operator, as left side of expression :param operator: any supported filter operation :param threshold: to compare value against, as right side of expression :raises InvalidFilterOperatorValue: if an unknown operator is provided :returns: boolean result of applied comparison """ if operator == 'gt': return value > threshold elif operator == 'gte': return value >= threshold elif operator == 'lt': return value < threshold elif operator == 'lte': return value <= threshold elif operator == 'neq': return value != threshold elif operator == 'eq': return value == threshold msg = _("Unable to filter on a unknown operator.") raise exception.InvalidFilterOperatorValue(msg) def _get_available_stores(): available_stores = CONF.enabled_backends stores = [] # Remove reserved stores from the available stores list for store in available_stores: # NOTE (abhishekk): http store is readonly and should be # excluded from the list. if available_stores[store] == 'http': continue if store not in wsgi.RESERVED_STORES: stores.append(store) return stores def get_stores_from_request(req, body): """Processes a supplied request and extract stores from it :param req: request to process :param body: request body :raises glance_store.UnknownScheme: if a store is not valid :return: a list of stores """ if body.get('all_stores', False): if 'stores' in body or 'x-image-meta-store' in req.headers: msg = _("All_stores parameter can't be used with " "x-image-meta-store header or stores parameter") raise exc.HTTPBadRequest(explanation=msg) stores = _get_available_stores() else: try: stores = body['stores'] except KeyError: stores = [req.headers.get('x-image-meta-store', CONF.glance_store.default_backend)] else: if 'x-image-meta-store' in req.headers: msg = _("Stores parameter and x-image-meta-store header can't " "be both specified") raise exc.HTTPBadRequest(explanation=msg) # Validate each store for store in stores: glance_store.get_store_from_store_identifier(store) return stores def sort_image_locations(locations): if not CONF.enabled_backends: return locations def get_store_weight(location): store_id = location['metadata'].get('store') if not store_id: return 0 try: store = glance_store.get_store_from_store_identifier(store_id) except glance_store.exceptions.UnknownScheme: msg = (_LW("Unable to find store '%s', returning " "default weight '0'") % store_id) LOG.warning(msg) return 0 return store.weight if store is not None else 0 sorted_locations = sorted(locations, key=get_store_weight, reverse=True) LOG.debug(('Sorted locations: %s'), sorted_locations) return sorted_locations def is_http_store_configured(url): if not url.startswith("http"): return False enabled_backends = CONF.enabled_backends if enabled_backends: return 'http' in enabled_backends.values() else: return 'http' in CONF.glance_store.stores ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/wsgi.py0000664000175000017500000013504200000000000017127 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ import abc import errno import functools import os import re import signal import struct import subprocess import sys import time from eventlet.green import socket import eventlet.greenio import eventlet.wsgi import glance_store from os_win import utilsfactory as os_win_utilsfactory from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils from osprofiler import opts as profiler_opts import routes.middleware import webob.dec import webob.exc from webob import multidict from glance.common import config from glance.common import exception from glance.common import store_utils from glance.common import utils import glance.db from glance import housekeeping from glance import i18n from glance.i18n import _, _LE, _LI, _LW from glance import sqlite_migration bind_opts = [ cfg.HostAddressOpt('bind_host', default='0.0.0.0', help=_(""" IP address to bind the glance servers to. Provide an IP address to bind the glance server to. The default value is ``0.0.0.0``. Edit this option to enable the server to listen on one particular IP address on the network card. This facilitates selection of a particular network interface for the server. Possible values: * A valid IPv4 address * A valid IPv6 address Related options: * None """)), cfg.PortOpt('bind_port', help=_(""" Port number on which the server will listen. Provide a valid port number to bind the server's socket to. This port is then set to identify processes and forward network messages that arrive at the server. The default bind_port value for the API server is 9292 and for the registry server is 9191. Possible values: * A valid port number (0 to 65535) Related options: * None """)), ] socket_opts = [ cfg.IntOpt('backlog', default=4096, min=1, help=_(""" Set the number of incoming connection requests. Provide a positive integer value to limit the number of requests in the backlog queue. The default queue size is 4096. An incoming connection to a TCP listener socket is queued before a connection can be established with the server. Setting the backlog for a TCP socket ensures a limited queue size for incoming traffic. Possible values: * Positive integer Related options: * None """)), cfg.IntOpt('tcp_keepidle', default=600, min=1, help=_(""" Set the wait time before a connection recheck. Provide a positive integer value representing time in seconds which is set as the idle wait time before a TCP keep alive packet can be sent to the host. The default value is 600 seconds. Setting ``tcp_keepidle`` helps verify at regular intervals that a connection is intact and prevents frequent TCP connection reestablishment. Possible values: * Positive integer value representing time in seconds Related options: * None """)), ] eventlet_opts = [ cfg.IntOpt('workers', min=0, help=_(""" Number of Glance worker processes to start. Provide a non-negative integer value to set the number of child process workers to service requests. By default, the number of CPUs available is set as the value for ``workers`` limited to 8. For example if the processor count is 6, 6 workers will be used, if the processor count is 24 only 8 workers will be used. The limit will only apply to the default value, if 24 workers is configured, 24 is used. Each worker process is made to listen on the port set in the configuration file and contains a greenthread pool of size 1000. NOTE: Setting the number of workers to zero, triggers the creation of a single API process with a greenthread pool of size 1000. Possible values: * 0 * Positive integer value (typically equal to the number of CPUs) Related options: * None """)), cfg.IntOpt('max_header_line', default=16384, min=0, help=_(""" Maximum line size of message headers. Provide an integer value representing a length to limit the size of message headers. The default value is 16384. NOTE: ``max_header_line`` may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs). However, it is to be kept in mind that larger values for ``max_header_line`` would flood the logs. Setting ``max_header_line`` to 0 sets no limit for the line size of message headers. Possible values: * 0 * Positive integer Related options: * None """)), cfg.BoolOpt('http_keepalive', default=True, help=_(""" Set keep alive option for HTTP over TCP. Provide a boolean value to determine sending of keep alive packets. If set to ``False``, the server returns the header "Connection: close". If set to ``True``, the server returns a "Connection: Keep-Alive" in its responses. This enables retention of the same TCP connection for HTTP conversations instead of opening a new one with each new request. This option must be set to ``False`` if the client socket connection needs to be closed explicitly after the response is received and read successfully by the client. Possible values: * True * False Related options: * None """)), cfg.IntOpt('client_socket_timeout', default=900, min=0, help=_(""" Timeout for client connections' socket operations. Provide a valid integer value representing time in seconds to set the period of wait before an incoming connection can be closed. The default value is 900 seconds. The value zero implies wait forever. Possible values: * Zero * Positive integer Related options: * None """)), ] store_opts = [ cfg.DictOpt('enabled_backends', help=_('Key:Value pair of store identifier and store type. ' 'In case of multiple backends should be separated ' 'using comma.')), ] cli_opts = [ cfg.StrOpt('pipe-handle', help='This argument is used internally on Windows. Glance ' 'passes a pipe handle to child processes, which is then ' 'used for inter-process communication.'), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(bind_opts) CONF.register_opts(socket_opts) CONF.register_opts(eventlet_opts) CONF.register_opts(store_opts) profiler_opts.set_defaults(CONF) ASYNC_EVENTLET_THREAD_POOL_LIST = [] # Detect if we're running under the uwsgi server try: import uwsgi LOG.debug('Detected running under uwsgi') except ImportError: LOG.debug('Detected not running under uwsgi') uwsgi = None # Reserved file stores for staging and tasks operations RESERVED_STORES = { 'os_glance_staging_store': 'file', 'os_glance_tasks_store': 'file' } def register_cli_opts(): CONF.register_cli_opts(cli_opts) def get_num_workers(): """Return the configured number of workers.""" # Windows only: we're already running on the worker side. if os.name == 'nt' and getattr(CONF, 'pipe_handle', None): return 0 if CONF.workers is None: # None implies the number of CPUs limited to 8 # See Launchpad bug #1748916 and the config help text workers = processutils.get_worker_count() return workers if workers < 8 else 8 return CONF.workers def get_bind_addr(default_port=None): """Return the host and port to bind to.""" return (CONF.bind_host, CONF.bind_port or default_port) def get_socket(default_port): """ Bind socket to bind ip:port in conf note: Mostly comes from Swift with a few small changes... :param default_port: port to bind to if none is specified in conf :returns: a socket object as returned from socket.listen """ bind_addr = get_bind_addr(default_port) # TODO(jaypipes): eventlet's greened socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix address_family = [ addr[0] for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6) ][0] sock = utils.get_test_suite_socket() retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=CONF.backlog, family=address_family) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s after" " trying for 30 seconds") % {'host': bind_addr[0], 'port': bind_addr[1]}) return sock def set_eventlet_hub(): try: eventlet.hubs.use_hub('poll') except Exception: try: eventlet.hubs.use_hub('selects') except Exception: msg = _("eventlet 'poll' nor 'selects' hubs are available " "on this platform") raise exception.WorkerCreationFailure( reason=msg) def initialize_glance_store(): """Initialize glance store.""" glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() def initialize_multi_store(): """Initialize glance multi store backends.""" glance_store.register_store_opts(CONF, reserved_stores=RESERVED_STORES) glance_store.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) glance_store.verify_store() def get_asynchronous_eventlet_pool(size=1000): """Return eventlet pool to caller. Also store pools created in global list, to wait on it after getting signal for graceful shutdown. :param size: eventlet pool size :returns: eventlet pool """ global ASYNC_EVENTLET_THREAD_POOL_LIST pool = eventlet.GreenPool(size=size) # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) return pool class BaseServer(metaclass=abc.ABCMeta): """Server class to manage multiple WSGI sockets and applications. This class requires initialize_glance_store set to True if glance store needs to be initialized. """ def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self.threads = threads self.children = set() self.stale_children = set() self.running = True # NOTE(abhishek): Allows us to only re-initialize glance_store when # the API's configuration reloads. self.initialize_glance_store = initialize_glance_store @staticmethod def set_signal_handler(signal_name, handler): # Some signals may not be available on this platform. sig = getattr(signal, signal_name, None) if sig is not None: signal.signal(sig, handler) def hup(self, *args): """ Reloads configuration files with zero down time """ self.set_signal_handler("SIGHUP", signal.SIG_IGN) raise exception.SIGHUPInterrupt @abc.abstractmethod def kill_children(self, *args): pass @abc.abstractmethod def wait_on_children(self): pass @abc.abstractmethod def run_child(self): pass def reload(self): raise NotImplementedError() def start(self, application, default_port): """ Run a WSGI server with the given application. :param application: The application to be run in the WSGI server :param default_port: Port to bind to if none is specified in conf """ self.application = application self.default_port = default_port self.configure() # NOTE(abhishekk): This will raise RuntimeError if # worker_self_reference_url is not set in glance-api.conf sqlite_migration.migrate_if_required() self.start_wsgi() # NOTE(danms): This may raise GlanceException if the staging store is # not configured properly, which will be caught and printed by # cmd/api.py as an error message and abort startup. staging = housekeeping.staging_store_path() if not os.path.exists(staging) and CONF.enabled_import_methods: LOG.warning(_LW('Import methods are enabled but staging directory ' '%(path)s does not exist; Imports will fail!'), {'path': staging}) cleaner = housekeeping.StagingStoreCleaner(glance.db.get_api()) self.pool.spawn_n(cleaner.clean_orphaned_staging_residue) def start_wsgi(self): workers = get_num_workers() self.pool = self.create_pool() if workers == 0: # Useful for profiling, test, debug etc. self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers"), workers) self.set_signal_handler("SIGTERM", self.kill_children) self.set_signal_handler("SIGINT", self.kill_children) self.set_signal_handler("SIGHUP", self.hup) while len(self.children) < workers: self.run_child() def create_pool(self): return get_asynchronous_eventlet_pool(size=self.threads) def configure(self, old_conf=None, has_changed=None): """ Apply configuration settings :param old_conf: Cached old configuration settings (if any) :param has_changed: callable to determine if a parameter has changed """ eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.client_socket_timeout = CONF.client_socket_timeout or None if self.initialize_glance_store: if CONF.enabled_backends: if store_utils.check_reserved_stores(CONF.enabled_backends): msg = _("'os_glance_' prefix should not be used in " "enabled_backends config option. It is reserved " "for internal use only.") raise RuntimeError(msg) initialize_multi_store() else: initialize_glance_store() self.configure_socket(old_conf, has_changed) def wait(self): """Wait until all servers have completed running.""" try: if self.children: self.wait_on_children() else: self.pool.waitall() except KeyboardInterrupt: pass def run_server(self): """Run a WSGI server.""" if cfg.CONF.pydev_worker_debug_host: utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host, cfg.CONF.pydev_worker_debug_port) eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" self.pool = self.create_pool() try: eventlet.wsgi.server(self.sock, self.application, log=self._logger, custom_pool=self.pool, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) except socket.error as err: if err[0] != errno.EINVAL: raise # waiting on async pools if ASYNC_EVENTLET_THREAD_POOL_LIST: for pool in ASYNC_EVENTLET_THREAD_POOL_LIST: pool.waitall() # NOTE(abhishekk): Importing the cache_images API module just # in time to avoid partial initialization of wsgi module from glance.api.v2 import cached_images # noqa if cached_images.WORKER: # If we started a cache worker, signal it to exit # and wait until it does. cached_images.WORKER.terminate() def _single_run(self, application, sock): """Start a WSGI server in a new green thread.""" LOG.info(_LI("Starting single process server")) eventlet.wsgi.server(sock, application, custom_pool=self.pool, log=self._logger, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) def configure_socket(self, old_conf=None, has_changed=None): """ Ensure a socket exists and is appropriately configured. This function is called on start up, and can also be called in the event of a configuration reload. When called for the first time a new socket is created. If reloading and either bind_host or bind port have been changed the existing socket must be closed and a new socket opened (laws of physics). In all other cases (bind_host/bind_port have not changed) the existing socket is reused. :param old_conf: Cached old configuration settings (if any) :param has changed: callable to determine if a parameter has changed """ # Do we need a fresh socket? new_sock = (old_conf is None or ( has_changed('bind_host') or has_changed('bind_port'))) if new_sock: self._sock = None if old_conf is not None: self.sock.close() _sock = get_socket(self.default_port) _sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive _sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.sock = _sock if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) if old_conf is not None and has_changed('backlog'): self.sock.listen(CONF.backlog) class PosixServer(BaseServer): def __init__(self, *args, **kwargs): super(PosixServer, self).__init__(*args, **kwargs) self.pgid = os.getpid() try: # NOTE(flaper87): Make sure this process # runs in its own process group. os.setpgid(self.pgid, self.pgid) except OSError: # NOTE(flaper87): When running glance-control, # (glance's functional tests, for example) # setpgid fails with EPERM as glance-control # creates a fresh session, of which the newly # launched service becomes the leader (session # leaders may not change process groups) # # Running glance-(api|registry) is safe and # shouldn't raise any error here. self.pgid = 0 def kill_children(self, *args): """Kills the entire process group.""" self.set_signal_handler("SIGTERM", signal.SIG_IGN) self.set_signal_handler("SIGINT", signal.SIG_IGN) self.set_signal_handler("SIGCHLD", signal.SIG_IGN) self.running = False os.killpg(self.pgid, signal.SIGTERM) def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warning(_LW('Unrecognised child %s'), pid) def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE('Not respawning child %d, cannot ' 'recover from termination'), pid) if not self.children and not self.stale_children: LOG.info( _LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < get_num_workers(): self.run_child() def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) break except exception.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited') def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" self.set_signal_handler("SIGHUP", signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: self.set_signal_handler("SIGHUP", child_hup) self.set_signal_handler("SIGTERM", signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result self.set_signal_handler("SIGINT", signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally'), os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s'), pid) self.children.add(pid) def reload(self): """ Reload and re-apply configuration settings Existing child processes are sent a SIGHUP signal and will exit after completing existing requests. New child processes, which will have the updated configuration, are spawned. This allows preventing interruption to the service. """ def _has_changed(old, new, param): old = old.get(param) new = getattr(new, param) return (new != old) old_conf = utils.stash_conf_values() has_changed = functools.partial(_has_changed, old_conf, CONF) CONF.reload_config_files() os.killpg(self.pgid, signal.SIGHUP) self.stale_children = self.children self.children = set() # Ensure any logging config changes are picked up logging.setup(CONF, 'glance') config.set_config_defaults() self.configure(old_conf, has_changed) self.start_wsgi() class Win32ProcessLauncher(object): def __init__(self): self._processutils = os_win_utilsfactory.get_processutils() self._workers = [] self._worker_job_handles = [] def add_process(self, cmd): LOG.info("Starting subprocess: %s", cmd) worker = subprocess.Popen(cmd, close_fds=False) try: job_handle = self._processutils.kill_process_on_job_close( worker.pid) except Exception: LOG.exception("Could not associate child process " "with a job, killing it.") worker.kill() raise self._worker_job_handles.append(job_handle) self._workers.append(worker) return worker def wait(self): pids = [worker.pid for worker in self._workers] if pids: self._processutils.wait_for_multiple_processes(pids, wait_all=True) # By sleeping here, we allow signal handlers to be executed. time.sleep(0) class Win32Server(BaseServer): _py_script_re = re.compile(r'.*\.py\w?$') _sock = None def __init__(self, *args, **kwargs): LOG.warning("Support for Glance on Windows operating systems is" "deprecated.") super(Win32Server, self).__init__(*args, **kwargs) self._launcher = Win32ProcessLauncher() self._ioutils = os_win_utilsfactory.get_ioutils() def run_child(self): # We're passing copies of the socket through pipes. rfd, wfd = self._ioutils.create_pipe(inherit_handle=True) cmd = sys.argv + ['--pipe-handle=%s' % int(rfd)] # Recent setuptools versions will trim '-script.py' and '.exe' # extensions from sys.argv[0]. if self._py_script_re.match(sys.argv[0]): cmd = [sys.executable] + cmd worker = self._launcher.add_process(cmd) self._ioutils.close_handle(rfd) share_sock_buff = self._sock.share(worker.pid) self._ioutils.write_file( wfd, struct.pack('= length: break return b''.join(response) class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, environ, *args, **kwargs): super(Request, self).__init__(environ, *args, **kwargs) @property def body_file(self): if uwsgi: if self.headers.get('transfer-encoding', '').lower() == 'chunked': return _UWSGIChunkFile() return super(Request, self).body_file @body_file.setter def body_file(self, value): # NOTE(cdent): If you have a property setter in a superclass, it will # not be inherited. webob.Request.body_file.fset(self, value) def best_match_content_type(self): """Determine the requested response content-type.""" supported = ('application/json',) best_matches = self.accept.acceptable_offers(supported) if not best_matches: return 'application/json' return best_matches[0][0] def get_content_type(self, allowed_content_types): """Determine content type of the request body.""" if "Content-Type" not in self.headers: raise exception.InvalidContentType(content_type=None) content_type = self.content_type if content_type not in allowed_content_types: raise exception.InvalidContentType(content_type=content_type) else: return content_type def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None langs = i18n.get_available_languages('glance') # NOTE(rosmaita): give the webob lookup() function a sentinel value # for default so we can preserve the behavior of this function as # indicated by the current unit tests. See Launchpad bug #1765748. best_match = self.accept_language.lookup(langs, default='fake_LANG') if best_match == 'fake_LANG': best_match = None return best_match def get_range_from_request(self, image_size): """Return the `Range` in a request.""" range_str = self.headers.get('Range') if range_str is not None: # NOTE(dharinic): We do not support multi range requests. if ',' in range_str: msg = ("Requests with multiple ranges are not supported in " "Glance. You may make multiple single-range requests " "instead.") raise webob.exc.HTTPBadRequest(explanation=msg) range_ = webob.byterange.Range.parse(range_str) if range_ is None: msg = ("Invalid Range header.") raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) # NOTE(dharinic): Ensure that a range like bytes=4- for an image # size of 3 is invalidated as per rfc7233. if range_.start >= image_size: msg = ("Invalid start position in Range header. " "Start position MUST be in the inclusive range [0, %s]." % (image_size - 1)) raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) return range_ # NOTE(dharinic): For backward compatibility reasons, we maintain # support for 'Content-Range' in requests even though it's not # correct to use it in requests.. c_range_str = self.headers.get('Content-Range') if c_range_str is not None: content_range = webob.byterange.ContentRange.parse(c_range_str) # NOTE(dharinic): Ensure that a content range like 1-4/* for an # image size of 3 is invalidated. if content_range is None: msg = ("Invalid Content-Range header.") raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) if (content_range.length is None and content_range.stop > image_size): msg = ("Invalid stop position in Content-Range header. " "The stop position MUST be in the inclusive range " "[0, %s]." % (image_size - 1)) raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) if content_range.start >= image_size: msg = ("Invalid start position in Content-Range header. " "Start position MUST be in the inclusive range [0, %s]." % (image_size - 1)) raise webob.exc.HTTPRequestRangeNotSatisfiable(msg) return content_range class JSONRequestDeserializer(object): valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate', 'gzip', 'identity']) httpverb_may_have_body = frozenset({'POST', 'PUT', 'PATCH'}) @classmethod def is_valid_encoding(cls, request): request_encoding = request.headers.get('transfer-encoding', '').lower() return request_encoding in cls.valid_transfer_encoding @classmethod def is_valid_method(cls, request): return request.method.upper() in cls.httpverb_may_have_body def has_body(self, request): """ Returns whether a Webob.Request object will possess an entity body. :param request: Webob.Request object """ if self.is_valid_encoding(request) and self.is_valid_method(request): request.is_body_readable = True return True if request.content_length is not None and request.content_length > 0: return True return False @staticmethod def _sanitizer(obj): """Sanitizer method that will be passed to jsonutils.loads.""" return obj def from_json(self, datastring): try: jsondata = jsonutils.loads(datastring, object_hook=self._sanitizer) if not isinstance(jsondata, (dict, list)): msg = _('Unexpected body type. Expected list/dict.') raise webob.exc.HTTPBadRequest(explanation=msg) return jsondata except ValueError: msg = _('Malformed JSON in request body.') raise webob.exc.HTTPBadRequest(explanation=msg) def default(self, request): if self.has_body(request): return {'body': self.from_json(request.body)} else: return {} class JSONResponseSerializer(object): def _sanitizer(self, obj): """Sanitizer method that will be passed to jsonutils.dumps.""" if hasattr(obj, "to_dict"): return obj.to_dict() if isinstance(obj, multidict.MultiDict): return obj.mixed() return jsonutils.to_primitive(obj) def to_json(self, data): return jsonutils.dump_as_bytes(data, default=self._sanitizer) def default(self, response, result): response.content_type = 'application/json' body = self.to_json(result) body = encodeutils.to_utf8(body) response.body = body def translate_exception(req, e): """Translates all translatable elements of the given exception.""" # The RequestClass attribute in the webob.dec.wsgify decorator # does not guarantee that the request object will be a particular # type; this check is therefore necessary. if not hasattr(req, "best_match_language"): return e locale = req.best_match_language() if isinstance(e, webob.exc.HTTPError): e.explanation = i18n.translate(e.explanation, locale) e.detail = i18n.translate(e.detail, locale) if getattr(e, 'body_template', None): e.body_template = i18n.translate(e.body_template, locale) return e class Resource(object): """ WSGI app that handles (de)serialization and controller dispatch. Reads routing information supplied by RoutesMiddleware and calls the requested action method upon its deserializer, controller, and serializer. Those three objects may implement any of the basic controller action methods (create, update, show, index, delete) along with any that may be specified in the api router. A 'default' method may also be implemented to be used in place of any non-implemented actions. Deserializer methods must accept a request argument and return a dictionary. Controller methods must accept a request argument. Additionally, they must also accept keyword arguments that represent the keys returned by the Deserializer. They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that supports webob request deserialization through controller-like actions :param serializer: object that supports webob response serialization through controller-like actions """ self.controller = controller self.serializer = serializer or JSONResponseSerializer() self.deserializer = deserializer or JSONRequestDeserializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) body_reject = strutils.bool_from_string( action_args.pop('body_reject', None)) try: if body_reject and self.deserializer.has_body(request): msg = _('A body is not expected with this request.') raise webob.exc.HTTPBadRequest(explanation=msg) deserialized_request = self.dispatch(self.deserializer, action, request) action_args.update(deserialized_request) action_result = self.dispatch(self.controller, action, request, **action_args) except webob.exc.WSGIHTTPException as e: e = translate_exception(request, e) raise e.with_traceback(sys.exc_info()[2]) except UnicodeDecodeError: msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Glance") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidPropertyProtectionConfiguration as e: LOG.exception(_LE("Caught error: %s"), encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e.msg) except Exception as e: LOG.exception(_LE("Caught error: %s"), encodeutils.exception_to_unicode(e)) response = webob.exc.HTTPInternalServerError() return response # We cannot serialize an Exception, so return the action_result if isinstance(action_result, Exception): return action_result try: response = webob.Response(request=request) self.dispatch(self.serializer, action, response, action_result) return response except webob.exc.WSGIHTTPException as e: return translate_exception(request, e) except webob.exc.HTTPException as e: return e # return unserializable result (typically a webob exc) except Exception: return action_result def dispatch(self, obj, action, *args, **kwargs): """Find action-specific method on self and call it.""" try: method = getattr(obj, action) except AttributeError: method = getattr(obj, 'default') return method(*args, **kwargs) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/wsgi_app.py0000664000175000017500000001310000000000000017755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import os import threading import glance_store from oslo_config import cfg from oslo_log import log as logging import osprofiler.initializer from glance.api import common import glance.async_ from glance.common import config from glance.common import store_utils from glance import housekeeping from glance.i18n import _, _LW from glance import notifier from glance import sqlite_migration CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") CONF.import_opt("enabled_backends", "glance.common.wsgi") logging.register_options(CONF) LOG = logging.getLogger(__name__) # Detect if we're running under the uwsgi server try: import uwsgi LOG.debug('Detected running under uwsgi') except ImportError: LOG.debug('Detected not running under uwsgi') uwsgi = None CONFIG_FILES = ['glance-image-import.conf', 'glance-api.conf'] # Reserved file stores for staging and tasks operations RESERVED_STORES = { 'os_glance_staging_store': 'file', 'os_glance_tasks_store': 'file' } def _get_config_files(env=None): if env is None: env = os.environ dirname = env.get('OS_GLANCE_CONFIG_DIR', '/etc/glance').strip() config_files = [] for config_file in CONFIG_FILES: cfg_file = os.path.join(dirname, config_file) # As 'glance-image-import.conf' is optional conf file # so include it only if it's existing. if config_file == 'glance-image-import.conf' and ( not os.path.exists(cfg_file)): continue config_files.append(cfg_file) return config_files def _setup_os_profiler(): notifier.set_defaults() if CONF.profiler.enabled: osprofiler.initializer.init_from_conf(conf=CONF, context={}, project='glance', service='api', host=CONF.bind_host) def drain_workers(): # NOTE(danms): If there are any other named pools that we need to # drain before exit, they should be in this list. pools_to_drain = ['tasks_pool'] for pool_name in pools_to_drain: pool_model = common.get_thread_pool(pool_name) LOG.info('Waiting for remaining threads in pool %r', pool_name) pool_model.pool.shutdown() from glance.api.v2 import cached_images # noqa if cached_images.WORKER: # If we started a cache worker, signal it to exit # and wait until it does. cached_images.WORKER.terminate() def run_staging_cleanup(): cleaner = housekeeping.StagingStoreCleaner(glance.db.get_api()) # NOTE(danms): Start thread as a daemon. It is still a # single-shot, but this will not block our shutdown if it is # running. cleanup_thread = threading.Thread( target=cleaner.clean_orphaned_staging_residue, daemon=True) cleanup_thread.start() def init_app(): config.set_config_defaults() config_files = _get_config_files() CONF([], project='glance', default_config_files=config_files) logging.setup(CONF, "glance") # NOTE(danms): We are running inside uwsgi or mod_wsgi, so no eventlet; # use native threading instead. glance.async_.set_threadpool_model('native') if uwsgi: uwsgi.atexit = drain_workers else: atexit.register(drain_workers) # NOTE(danms): Change the default threadpool size since we # are dealing with native threads and not greenthreads. # Right now, the only pool of default size is tasks_pool, # so if others are created this will need to change to be # more specific. common.DEFAULT_POOL_SIZE = CONF.wsgi.task_pool_threads if CONF.enabled_backends: if store_utils.check_reserved_stores(CONF.enabled_backends): msg = _("'os_glance_' prefix should not be used in " "enabled_backends config option. It is reserved " "for internal use only.") raise RuntimeError(msg) glance_store.register_store_opts(CONF, reserved_stores=RESERVED_STORES) glance_store.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) glance_store.verify_store() else: glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() # NOTE(abhishekk): This will raise RuntimeError if # worker_self_reference_url is not set in glance-api.conf sqlite_migration.migrate_if_required() # NOTE(danms): This may raise GlanceException if the staging store is # not configured properly, which will bubble up to the WSGI server, # aborting application load as desired. staging = housekeeping.staging_store_path() if not os.path.exists(staging) and CONF.enabled_import_methods: LOG.warning(_LW('Import methods are enabled but staging directory ' '%(path)s does not exist; Imports will fail!'), {'path': staging}) run_staging_cleanup() _setup_os_profiler() return config.load_paste_app('glance-api') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/common/wsme_utils.py0000664000175000017500000000443000000000000020345 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from wsme import types as wsme_types from glance.common import timeutils class WSMEModelTransformer(object): def to_dict(self): # Return the wsme_attributes names:values as a dict my_dict = {} for attribute in self._wsme_attributes: value = getattr(self, attribute.name) if value is not wsme_types.Unset: my_dict.update({attribute.name: value}) return my_dict @classmethod def to_wsme_model(model, db_entity, self_link=None, schema=None): # Return the wsme_attributes names:values as a dict names = [] for attribute in model._wsme_attributes: names.append(attribute.name) values = {} for name in names: value = getattr(db_entity, name, None) if value is not None: if isinstance(value, datetime): iso_datetime_value = timeutils.isotime(value) values.update({name: iso_datetime_value}) else: values.update({name: value}) if schema: values['schema'] = schema model_object = model(**values) # 'self' kwarg is used in wsme.types.Base.__init__(self, ..) and # conflicts during initialization. self_link is a proxy field to self. if self_link: model_object.self = self_link return model_object @classmethod def get_mandatory_attrs(cls): return [attr.name for attr in cls._wsme_attributes if attr.mandatory] def _get_value(obj): if obj is not wsme_types.Unset: return obj else: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/context.py0000664000175000017500000000706400000000000016354 0ustar00zuulzuul00000000000000# Copyright 2011-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystoneauth1 import session from keystoneauth1 import token_endpoint from oslo_config import cfg from oslo_context import context from glance.api import policy CONF = cfg.CONF def get_ksa_client(context): """Returns a keystoneauth Adapter using token from context. This will return a simple keystoneauth adapter that can be used to make requests against a remote service using the token provided (and already authenticated) from the user and stored in a RequestContext. :param context: User request context :returns: keystoneauth1 Adapter object """ auth = token_endpoint.Token(CONF.keystone_authtoken.identity_uri, context.auth_token) return session.Session(auth=auth) class RequestContext(context.RequestContext): """Stores information about the security context. Stores how the user accesses the system, as well as additional request information. """ def __init__(self, service_catalog=None, policy_enforcer=None, **kwargs): # TODO(mriedem): Remove usage of user and tenant from old tests. if 'tenant' in kwargs: # Prefer project_id if passed, otherwise alias tenant as project_id tenant = kwargs.pop('tenant') kwargs['project_id'] = kwargs.get('project_id', tenant) if 'user' in kwargs: # Prefer user_id if passed, otherwise alias user as user_id user = kwargs.pop('user') kwargs['user_id'] = kwargs.get('user_id', user) super(RequestContext, self).__init__(**kwargs) self.service_catalog = service_catalog self.policy_enforcer = policy_enforcer or policy.Enforcer() if not self.is_admin: self.is_admin = self.policy_enforcer.check_is_admin(self) def to_dict(self): d = super(RequestContext, self).to_dict() d.update({ 'roles': self.roles, 'service_catalog': self.service_catalog, }) return d @classmethod def from_dict(cls, values): return cls(**values) @property def owner(self): """Return the owner to correlate with an image.""" return self.project_id @property def can_see_deleted(self): """Admins can see deleted by default""" return self.show_deleted or self.is_admin def elevated(self): """Return a copy of this context with admin flag set.""" context = copy.copy(self) context.roles = copy.deepcopy(self.roles) if 'admin' not in context.roles: context.roles.append('admin') context.is_admin = True return context def get_admin_context(show_deleted=False): """Create an administrator context.""" return RequestContext(auth_token=None, project_id=None, is_admin=True, show_deleted=show_deleted, overwrite=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8623035 glance-29.0.0/glance/db/0000775000175000017500000000000000000000000014674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/__init__.py0000664000175000017500000010505600000000000017014 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2012 OpenStack Foundation # Copyright 2013 IBM Corp. # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils from wsme.rest import json from glance.api.v2.model.metadef_property_type import PropertyType from glance.common import crypt from glance.common import exception from glance.common import utils as common_utils import glance.domain import glance.domain.proxy from glance.i18n import _ CONF = cfg.CONF CONF.import_opt('image_size_cap', 'glance.common.config') CONF.import_opt('metadata_encryption_key', 'glance.common.config') def get_api(): api = importutils.import_module('glance.db.sqlalchemy.api') if hasattr(api, 'configure'): api.configure() return api def unwrap(db_api): return db_api # attributes common to all models BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at', 'deleted']) IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'is_public', 'locations', 'checksum', 'owner', 'protected']) IMAGE_ATOMIC_PROPS = set(['os_glance_import_task']) class ImageRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def get(self, image_id): try: db_api_image = dict(self.db_api.image_get(self.context, image_id)) if db_api_image['deleted']: raise exception.ImageNotFound() except (exception.ImageNotFound, exception.Forbidden): msg = _("No image found with ID %s") % image_id raise exception.ImageNotFound(msg) tags = self.db_api.image_tag_get_all(self.context, image_id) image = self._format_image_from_db(db_api_image, tags) return ImageProxy(image, self.context, self.db_api) def list(self, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, member_status='accepted'): sort_key = ['created_at'] if not sort_key else sort_key sort_dir = ['desc'] if not sort_dir else sort_dir db_api_images = self.db_api.image_get_all( self.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, member_status=member_status, return_tag=True) images = [] for db_api_image in db_api_images: db_image = dict(db_api_image) image = self._format_image_from_db(db_image, db_image['tags']) images.append(image) return images def _format_image_from_db(self, db_image, db_tags): properties = {} for prop in db_image.pop('properties'): # NOTE(markwash) db api requires us to filter deleted if not prop['deleted']: properties[prop['name']] = prop['value'] locations = [loc for loc in db_image['locations'] if loc['status'] == 'active'] if CONF.metadata_encryption_key: key = CONF.metadata_encryption_key for location in locations: location['url'] = crypt.urlsafe_decrypt(key, location['url']) # NOTE(danms): If the image is shared and we are not the # owner, we must have found it because we are a member. Set # our tenant on the image as 'member' for policy checks in the # upper layers. For any other image stage, we found the image # some other way, so leave member=None. if (db_image['visibility'] == 'shared' and self.context.owner != db_image['owner']): member = self.context.owner else: member = None return glance.domain.Image( image_id=db_image['id'], name=db_image['name'], status=db_image['status'], created_at=db_image['created_at'], updated_at=db_image['updated_at'], visibility=db_image['visibility'], min_disk=db_image['min_disk'], min_ram=db_image['min_ram'], protected=db_image['protected'], locations=common_utils.sort_image_locations(locations), checksum=db_image['checksum'], os_hash_algo=db_image['os_hash_algo'], os_hash_value=db_image['os_hash_value'], owner=db_image['owner'], disk_format=db_image['disk_format'], container_format=db_image['container_format'], size=db_image['size'], virtual_size=db_image['virtual_size'], extra_properties=properties, tags=db_tags, os_hidden=db_image['os_hidden'], member=member, ) def _format_image_to_db(self, image): locations = image.locations if CONF.metadata_encryption_key: key = CONF.metadata_encryption_key ld = [] for loc in locations: url = crypt.urlsafe_encrypt(key, loc['url']) ld.append({'url': url, 'metadata': loc['metadata'], 'status': loc['status'], # NOTE(zhiyan): New location has no ID field. 'id': loc.get('id')}) locations = ld return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': image.created_at, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'locations': locations, 'checksum': image.checksum, 'os_hash_algo': image.os_hash_algo, 'os_hash_value': image.os_hash_value, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'virtual_size': image.virtual_size, 'visibility': image.visibility, 'properties': dict(image.extra_properties), 'os_hidden': image.os_hidden } def add(self, image): image_values = self._format_image_to_db(image) if (image_values['size'] is not None and image_values['size'] > CONF.image_size_cap): raise exception.ImageSizeLimitExceeded # the updated_at value is not set in the _format_image_to_db # function since it is specific to image create image_values['updated_at'] = image.updated_at new_values = self.db_api.image_create(self.context, image_values) self.db_api.image_tag_set_all(self.context, image.image_id, image.tags) image.created_at = new_values['created_at'] image.updated_at = new_values['updated_at'] def save(self, image, from_state=None): image_values = self._format_image_to_db(image) if (image_values['size'] is not None and image_values['size'] > CONF.image_size_cap): raise exception.ImageSizeLimitExceeded new_values = self.db_api.image_update(self.context, image.image_id, image_values, purge_props=True, from_state=from_state, atomic_props=( IMAGE_ATOMIC_PROPS)) self.db_api.image_tag_set_all(self.context, image.image_id, image.tags) image.updated_at = new_values['updated_at'] def remove(self, image): try: self.db_api.image_update(self.context, image.image_id, {'status': image.status}, purge_props=True) except (exception.ImageNotFound, exception.Forbidden): msg = _("No image found with ID %s") % image.image_id raise exception.ImageNotFound(msg) # NOTE(markwash): don't update tags? new_values = self.db_api.image_destroy(self.context, image.image_id) image.updated_at = new_values['updated_at'] def set_property_atomic(self, image, name, value): self.db_api.image_set_property_atomic( image.image_id, name, value) def delete_property_atomic(self, image, name, value): self.db_api.image_delete_property_atomic( image.image_id, name, value) class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, db_api): self.context = context self.db_api = db_api self.image = image super(ImageProxy, self).__init__(image) class ImageMemberRepo(object): def __init__(self, context, db_api, image): self.context = context self.db_api = db_api self.image = image def _format_image_member_from_db(self, db_image_member): return glance.domain.ImageMembership( id=db_image_member['id'], image_id=db_image_member['image_id'], member_id=db_image_member['member'], status=db_image_member['status'], created_at=db_image_member['created_at'], updated_at=db_image_member['updated_at'] ) def _format_image_member_to_db(self, image_member): image_member = {'image_id': self.image.image_id, 'member': image_member.member_id, 'status': image_member.status, 'created_at': image_member.created_at} return image_member def list(self): db_members = self.db_api.image_member_find( self.context, image_id=self.image.image_id) image_members = [] for db_member in db_members: image_members.append(self._format_image_member_from_db(db_member)) return image_members def add(self, image_member): try: self.get(image_member.member_id) except exception.NotFound: pass else: msg = _('The target member %(member_id)s is already ' 'associated with image %(image_id)s.') % { 'member_id': image_member.member_id, 'image_id': self.image.image_id} raise exception.Duplicate(msg) image_member_values = self._format_image_member_to_db(image_member) # Note(shalq): find the image member including the member marked with # deleted. We will use only one record to represent membership between # the same image and member. The record of the deleted image member # will be reused, if it exists, update its properties instead of # creating a new one. members = self.db_api.image_member_find(self.context, image_id=self.image.image_id, member=image_member.member_id, include_deleted=True) if members: new_values = self.db_api.image_member_update(self.context, members[0]['id'], image_member_values) else: new_values = self.db_api.image_member_create(self.context, image_member_values) image_member.created_at = new_values['created_at'] image_member.updated_at = new_values['updated_at'] image_member.id = new_values['id'] def remove(self, image_member): try: self.db_api.image_member_delete(self.context, image_member.id) except (exception.NotFound, exception.Forbidden): msg = _("The specified member %s could not be found") raise exception.NotFound(msg % image_member.id) def save(self, image_member, from_state=None): image_member_values = self._format_image_member_to_db(image_member) try: new_values = self.db_api.image_member_update(self.context, image_member.id, image_member_values) except (exception.NotFound, exception.Forbidden): raise exception.NotFound() image_member.updated_at = new_values['updated_at'] def get(self, member_id): try: db_api_image_member = self.db_api.image_member_find( self.context, self.image.image_id, member_id) if not db_api_image_member: raise exception.NotFound() except (exception.NotFound, exception.Forbidden): raise exception.NotFound() image_member = self._format_image_member_from_db( db_api_image_member[0]) return image_member class TaskRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def _format_task_from_db(self, db_task): return glance.domain.Task( task_id=db_task['id'], task_type=db_task['type'], status=db_task['status'], owner=db_task['owner'], expires_at=db_task['expires_at'], created_at=db_task['created_at'], updated_at=db_task['updated_at'], task_input=db_task['input'], result=db_task['result'], message=db_task['message'], image_id=db_task['image_id'], user_id=db_task['user_id'], request_id=db_task['request_id'], ) def _format_task_stub_from_db(self, db_task): return glance.domain.TaskStub( task_id=db_task['id'], task_type=db_task['type'], status=db_task['status'], owner=db_task['owner'], expires_at=db_task['expires_at'], created_at=db_task['created_at'], updated_at=db_task['updated_at'], image_id=db_task['image_id'], user_id=db_task['user_id'], request_id=db_task['request_id'], ) def _format_task_to_db(self, task): task = {'id': task.task_id, 'type': task.type, 'status': task.status, 'input': task.task_input, 'result': task.result, 'owner': task.owner, 'message': task.message, 'expires_at': task.expires_at, 'created_at': task.created_at, 'updated_at': task.updated_at, 'image_id': task.image_id, 'request_id': task.request_id, 'user_id': task.user_id, } return task def get(self, task_id): try: db_api_task = self.db_api.task_get(self.context, task_id) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task_id raise exception.NotFound(msg) return self._format_task_from_db(db_api_task) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): db_api_tasks = self.db_api.task_get_all(self.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) return [self._format_task_stub_from_db(task) for task in db_api_tasks] def save(self, task): task_values = self._format_task_to_db(task) try: updated_values = self.db_api.task_update(self.context, task.task_id, task_values) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task.task_id raise exception.NotFound(msg) task.updated_at = updated_values['updated_at'] def add(self, task): task_values = self._format_task_to_db(task) updated_values = self.db_api.task_create(self.context, task_values) task.created_at = updated_values['created_at'] task.updated_at = updated_values['updated_at'] def remove(self, task): task_values = self._format_task_to_db(task) try: self.db_api.task_update(self.context, task.task_id, task_values) updated_values = self.db_api.task_delete(self.context, task.task_id) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task.task_id raise exception.NotFound(msg) task.updated_at = updated_values['updated_at'] task.deleted_at = updated_values['deleted_at'] class MetadefNamespaceRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def _format_namespace_from_db(self, namespace_obj): return glance.domain.MetadefNamespace( namespace_id=namespace_obj['id'], namespace=namespace_obj['namespace'], display_name=namespace_obj['display_name'], description=namespace_obj['description'], owner=namespace_obj['owner'], visibility=namespace_obj['visibility'], protected=namespace_obj['protected'], created_at=namespace_obj['created_at'], updated_at=namespace_obj['updated_at'] ) def _format_namespace_to_db(self, namespace_obj): namespace = { 'namespace': namespace_obj.namespace, 'display_name': namespace_obj.display_name, 'description': namespace_obj.description, 'visibility': namespace_obj.visibility, 'protected': namespace_obj.protected, 'owner': namespace_obj.owner } return namespace def add(self, namespace): self.db_api.metadef_namespace_create( self.context, self._format_namespace_to_db(namespace) ) def get(self, namespace): db_api_namespace = self.db_api.metadef_namespace_get( self.context, namespace) return self._format_namespace_from_db(db_api_namespace) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): db_namespaces = self.db_api.metadef_namespace_get_all( self.context, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters ) return [self._format_namespace_from_db(namespace_obj) for namespace_obj in db_namespaces] def remove(self, namespace): try: self.db_api.metadef_namespace_delete(self.context, namespace.namespace) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_objects(self, namespace): try: self.db_api.metadef_object_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_properties(self, namespace): try: self.db_api.metadef_property_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_tags(self, namespace): try: self.db_api.metadef_tag_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def object_count(self, namespace_name): return self.db_api.metadef_object_count( self.context, namespace_name ) def property_count(self, namespace_name): return self.db_api.metadef_property_count( self.context, namespace_name ) def save(self, namespace): try: self.db_api.metadef_namespace_update( self.context, namespace.namespace_id, self._format_namespace_to_db(namespace) ) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return namespace class MetadefObjectRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_object_from_db(self, metadata_object, namespace_entity): required_str = metadata_object['required'] required_list = required_str.split(",") if required_str else [] # Convert the persisted json schema to a dict of PropertyTypes property_types = {} json_props = metadata_object['json_schema'] for id in json_props: property_types[id] = json.fromjson(PropertyType, json_props[id]) return glance.domain.MetadefObject( namespace=namespace_entity, object_id=metadata_object['id'], name=metadata_object['name'], required=required_list, description=metadata_object['description'], properties=property_types, created_at=metadata_object['created_at'], updated_at=metadata_object['updated_at'] ) def _format_metadef_object_to_db(self, metadata_object): required_str = (",".join(metadata_object.required) if metadata_object.required else None) # Convert the model PropertyTypes dict to a JSON string properties = metadata_object.properties db_schema = {} if properties: for k, v in properties.items(): json_data = json.tojson(PropertyType, v) db_schema[k] = json_data db_metadata_object = { 'name': metadata_object.name, 'required': required_str, 'description': metadata_object.description, 'json_schema': db_schema } return db_metadata_object def add(self, metadata_object): self.db_api.metadef_object_create( self.context, metadata_object.namespace, self._format_metadef_object_to_db(metadata_object) ) def get(self, namespace, object_name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_object = self.db_api.metadef_object_get( self.context, namespace, object_name) except (exception.NotFound, exception.Forbidden): msg = _('Could not find metadata object %s') % object_name raise exception.NotFound(msg) return self._format_metadef_object_from_db(db_metadata_object, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_objects = self.db_api.metadef_object_get_all( self.context, namespace) return [self._format_metadef_object_from_db(metadata_object, namespace_entity) for metadata_object in db_metadata_objects] def remove(self, metadata_object): try: self.db_api.metadef_object_delete( self.context, metadata_object.namespace.namespace, metadata_object.name ) except (exception.NotFound, exception.Forbidden): msg = _("The specified metadata object %s could not be found") raise exception.NotFound(msg % metadata_object.name) def save(self, metadata_object): try: self.db_api.metadef_object_update( self.context, metadata_object.namespace.namespace, metadata_object.object_id, self._format_metadef_object_to_db(metadata_object)) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return metadata_object class MetadefResourceTypeRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_resource_type_from_db(self, resource_type, namespace): return glance.domain.MetadefResourceType( namespace=namespace, name=resource_type['name'], prefix=resource_type['prefix'], properties_target=resource_type['properties_target'], created_at=resource_type['created_at'], updated_at=resource_type['updated_at'] ) def _format_resource_type_to_db(self, resource_type): db_resource_type = { 'name': resource_type.name, 'prefix': resource_type.prefix, 'properties_target': resource_type.properties_target } return db_resource_type def add(self, resource_type): self.db_api.metadef_resource_type_association_create( self.context, resource_type.namespace, self._format_resource_type_to_db(resource_type) ) def get(self, resource_type, namespace): namespace_entity = self.meta_namespace_repo.get(namespace) db_resource_type = ( self.db_api. metadef_resource_type_association_get( self.context, namespace, resource_type ) ) return self._format_resource_type_from_db(db_resource_type, namespace_entity) def list(self, filters=None): namespace = filters['namespace'] if namespace: namespace_entity = self.meta_namespace_repo.get(namespace) db_resource_types = ( self.db_api. metadef_resource_type_association_get_all_by_namespace( self.context, namespace ) ) return [self._format_resource_type_from_db(resource_type, namespace_entity) for resource_type in db_resource_types] else: db_resource_types = ( self.db_api. metadef_resource_type_get_all(self.context) ) return [glance.domain.MetadefResourceType( namespace=None, name=resource_type['name'], prefix=None, properties_target=None, created_at=resource_type['created_at'], updated_at=resource_type['updated_at'] ) for resource_type in db_resource_types] def remove(self, resource_type): try: self.db_api.metadef_resource_type_association_delete( self.context, resource_type.namespace.namespace, resource_type.name) except (exception.NotFound, exception.Forbidden): msg = _("The specified resource type %s could not be found ") raise exception.NotFound(msg % resource_type.name) class MetadefPropertyRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_property_from_db( self, property, namespace_entity): return glance.domain.MetadefProperty( namespace=namespace_entity, property_id=property['id'], name=property['name'], schema=property['json_schema'] ) def _format_metadef_property_to_db(self, property): db_metadata_object = { 'name': property.name, 'json_schema': property.schema } return db_metadata_object def add(self, property): self.db_api.metadef_property_create( self.context, property.namespace, self._format_metadef_property_to_db(property) ) def get(self, namespace, property_name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_property_type = self.db_api.metadef_property_get( self.context, namespace, property_name ) except (exception.NotFound, exception.Forbidden): msg = _('Could not find property %s') % property_name raise exception.NotFound(msg) return self._format_metadef_property_from_db( db_property_type, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_properties = self.db_api.metadef_property_get_all( self.context, namespace) return ( [self._format_metadef_property_from_db( property, namespace_entity) for property in db_properties] ) def remove(self, property): try: self.db_api.metadef_property_delete( self.context, property.namespace.namespace, property.name) except (exception.NotFound, exception.Forbidden): msg = _("The specified property %s could not be found") raise exception.NotFound(msg % property.name) def save(self, property): try: self.db_api.metadef_property_update( self.context, property.namespace.namespace, property.property_id, self._format_metadef_property_to_db(property) ) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return property class MetadefTagRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_tag_from_db(self, metadata_tag, namespace_entity): return glance.domain.MetadefTag( namespace=namespace_entity, tag_id=metadata_tag['id'], name=metadata_tag['name'], created_at=metadata_tag['created_at'], updated_at=metadata_tag['updated_at'] ) def _format_metadef_tag_to_db(self, metadata_tag): db_metadata_tag = { 'name': metadata_tag.name } return db_metadata_tag def add(self, metadata_tag): self.db_api.metadef_tag_create( self.context, metadata_tag.namespace, self._format_metadef_tag_to_db(metadata_tag) ) def add_tags(self, metadata_tags, can_append=False): tag_list = [] namespace = None for metadata_tag in metadata_tags: tag_list.append(self._format_metadef_tag_to_db(metadata_tag)) if namespace is None: namespace = metadata_tag.namespace self.db_api.metadef_tag_create_tags( self.context, namespace, tag_list, can_append) def get(self, namespace, name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_tag = self.db_api.metadef_tag_get( self.context, namespace, name) except (exception.NotFound, exception.Forbidden): msg = _('Could not find metadata tag %s') % name raise exception.NotFound(msg) return self._format_metadef_tag_from_db(db_metadata_tag, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_tag = self.db_api.metadef_tag_get_all( self.context, namespace, filters, marker, limit, sort_key, sort_dir) return [self._format_metadef_tag_from_db(metadata_tag, namespace_entity) for metadata_tag in db_metadata_tag] def remove(self, metadata_tag): try: self.db_api.metadef_tag_delete( self.context, metadata_tag.namespace.namespace, metadata_tag.name ) except (exception.NotFound, exception.Forbidden): msg = _("The specified metadata tag %s could not be found") raise exception.NotFound(msg % metadata_tag.name) def save(self, metadata_tag): try: self.db_api.metadef_tag_update( self.context, metadata_tag.namespace.namespace, metadata_tag.tag_id, self._format_metadef_tag_to_db(metadata_tag)) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return metadata_tag ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/metadata.py0000664000175000017500000000411200000000000017024 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Metadata setup commands.""" import threading from oslo_config import cfg from oslo_db import options as db_options from stevedore import driver from glance.db.sqlalchemy import api as db_api _IMPL = None _LOCK = threading.Lock() db_options.set_defaults(cfg.CONF) def get_backend(): global _IMPL if _IMPL is None: with _LOCK: if _IMPL is None: _IMPL = driver.DriverManager( "glance.database.metadata_backend", cfg.CONF.database.backend).driver return _IMPL def load_metadefs(): """Read metadefinition files and insert data into the database""" return get_backend().db_load_metadefs(engine=db_api.get_engine(), metadata_path=None, merge=False, prefer_new=False, overwrite=False) def unload_metadefs(): """Unload metadefinitions from database""" return get_backend().db_unload_metadefs(engine=db_api.get_engine()) def export_metadefs(): """Export metadefinitions from database to files""" return get_backend().db_export_metadefs(engine=db_api.get_engine(), metadata_path=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/migration.py0000664000175000017500000000211100000000000017232 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from oslo_config import cfg from oslo_db import options as db_options db_options.set_defaults(cfg.CONF) # Migration-related constants EXPAND_BRANCH = 'expand' CONTRACT_BRANCH = 'contract' CURRENT_RELEASE = '2024_2' ALEMBIC_INIT_VERSION = 'liberty' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8623035 glance-29.0.0/glance/db/simple/0000775000175000017500000000000000000000000016165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/simple/__init__.py0000664000175000017500000000023100000000000020272 0ustar00zuulzuul00000000000000# flake8: noqa # Note(jokke): SimpleDB is only used for unittests and #noqa # has not been supported in production since moving # to alembic migrations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/simple/api.py0000664000175000017500000022271100000000000017315 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack, Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import uuid from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.db import utils as db_utils from glance.i18n import _, _LI, _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) DATA = { 'cached_images': {}, 'images': {}, 'members': [], 'metadef_namespace_resource_types': [], 'metadef_namespaces': [], 'metadef_objects': [], 'metadef_properties': [], 'metadef_resource_types': [], 'metadef_tags': [], 'node_reference': {}, 'tags': {}, 'locations': [], 'tasks': {}, 'task_info': {}, } INDEX = 0 def log_call(func): @functools.wraps(func) def wrapped(*args, **kwargs): LOG.info(_LI('Calling %(funcname)s: args=%(args)s, ' 'kwargs=%(kwargs)s'), {"funcname": func.__name__, "args": args, "kwargs": kwargs}) output = func(*args, **kwargs) LOG.info(_LI('Returning %(funcname)s: %(output)s'), {"funcname": func.__name__, "output": output}) return output return wrapped def configure(): if CONF.workers not in [0, 1]: msg = _('CONF.workers should be set to 0 or 1 when using the ' 'db.simple.api backend. Fore more info, see ' 'https://bugs.launchpad.net/glance/+bug/1619508') LOG.critical(msg) raise SystemExit(msg) def reset(): global DATA DATA = { 'cached_images': {}, 'images': {}, 'members': [], 'metadef_namespace_resource_types': [], 'metadef_namespaces': [], 'metadef_objects': [], 'metadef_properties': [], 'metadef_resource_types': [], 'metadef_tags': [], 'node_reference': {}, 'tags': {}, 'locations': [], 'tasks': {}, 'task_info': {}, } def clear_db_env(*args, **kwargs): """ Setup global environment configuration variables. We have no connection-oriented environment variables, so this is a NOOP. """ pass def _get_session(): return DATA @utils.no_4byte_params def _image_location_format(image_id, value, meta_data, status, deleted=False): dt = timeutils.utcnow() return { 'id': str(uuid.uuid4()), 'image_id': image_id, 'created_at': dt, 'updated_at': dt, 'deleted_at': dt if deleted else None, 'deleted': deleted, 'url': value, 'metadata': meta_data, 'status': status, } def _image_property_format(image_id, name, value): return { 'image_id': image_id, 'name': name, 'value': value, 'deleted': False, 'deleted_at': None, } def _image_member_format(image_id, tenant_id, can_share, status='pending', deleted=False): dt = timeutils.utcnow() return { 'id': str(uuid.uuid4()), 'image_id': image_id, 'member': tenant_id, 'can_share': can_share, 'status': status, 'created_at': dt, 'updated_at': dt, 'deleted': deleted, } def _pop_task_info_values(values): task_info_values = {} for k, v in list(values.items()): if k in ['input', 'result', 'message']: values.pop(k) task_info_values[k] = v return task_info_values def _format_task_from_db(task_ref, task_info_ref): task = copy.deepcopy(task_ref) if task_info_ref: task_info = copy.deepcopy(task_info_ref) task_info_values = _pop_task_info_values(task_info) task.update(task_info_values) return task def _task_format(task_id, **values): dt = timeutils.utcnow() task = { 'id': task_id, 'type': 'import', 'status': values.get('status', 'pending'), 'owner': None, 'expires_at': None, 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, 'image_id': values.get('image_id', None), 'request_id': values.get('request_id', None), 'user_id': values.get('user_id', None), } task.update(values) return task def _task_info_format(task_id, **values): task_info = { 'task_id': task_id, 'input': None, 'result': None, 'message': None, } task_info.update(values) return task_info @utils.no_4byte_params def _image_update(image, values, properties): # NOTE(bcwaldon): store properties as a list to match sqlalchemy driver properties = [{'name': k, 'value': v, 'image_id': image['id'], 'deleted': False} for k, v in properties.items()] if 'properties' not in image.keys(): image['properties'] = [] image['properties'].extend(properties) values = db_utils.ensure_image_dict_v2_compliant(values) image.update(values) return image def _image_format(image_id, **values): dt = timeutils.utcnow() image = { 'id': image_id, 'name': None, 'owner': None, 'locations': [], 'status': 'queued', 'protected': False, 'visibility': 'shared', 'container_format': None, 'disk_format': None, 'min_ram': 0, 'min_disk': 0, 'size': None, 'virtual_size': None, 'checksum': None, 'os_hash_algo': None, 'os_hash_value': None, 'tags': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, 'os_hidden': False } locations = values.pop('locations', None) if locations is not None: image['locations'] = [] for location in locations: location_ref = _image_location_format(image_id, location['url'], location['metadata'], location['status']) image['locations'].append(location_ref) DATA['locations'].append(location_ref) return _image_update(image, values, values.pop('properties', {})) def _filter_images(images, filters, context, status='accepted', is_public=None, admin_as_user=False): filtered_images = [] if 'properties' in filters: prop_filter = filters.pop('properties') filters.update(prop_filter) if status == 'all': status = None visibility = filters.pop('visibility', None) os_hidden = filters.pop('os_hidden', False) for image in images: member = image_member_find(context, image_id=image['id'], member=context.owner, status=status) is_member = len(member) > 0 has_ownership = context.owner and image['owner'] == context.owner image_is_public = image['visibility'] == 'public' image_is_community = image['visibility'] == 'community' image_is_shared = image['visibility'] == 'shared' image_is_hidden = image['os_hidden'] == True acts_as_admin = context.is_admin and not admin_as_user can_see = (image_is_public or image_is_community or has_ownership or (is_member and image_is_shared) or acts_as_admin) if not can_see: continue if visibility: if visibility == 'public': if not image_is_public: continue elif visibility == 'private': if not (image['visibility'] == 'private'): continue if not (has_ownership or acts_as_admin): continue elif visibility == 'shared': if not image_is_shared: continue elif visibility == 'community': if not image_is_community: continue else: if (not has_ownership) and image_is_community: continue if is_public is not None: if not image_is_public == is_public: continue if os_hidden: if image_is_hidden: continue to_add = True for k, value in filters.items(): key = k if k.endswith('_min') or k.endswith('_max'): key = key[0:-4] try: value = int(value) except ValueError: msg = _("Unable to filter on a range " "with a non-numeric value.") raise exception.InvalidFilterRangeValue(msg) if k.endswith('_min'): to_add = image.get(key) >= value elif k.endswith('_max'): to_add = image.get(key) <= value elif k in ['created_at', 'updated_at']: attr_value = image.get(key) operator, isotime = utils.split_filter_op(value) parsed_time = timeutils.parse_isotime(isotime) threshold = timeutils.normalize_time(parsed_time) to_add = utils.evaluate_filter_op(attr_value, operator, threshold) elif k in ['name', 'id', 'status', 'container_format', 'disk_format']: attr_value = image.get(key) operator, list_value = utils.split_filter_op(value) if operator == 'in': threshold = utils.split_filter_value_for_quotes(list_value) to_add = attr_value in threshold elif operator == 'eq': to_add = (attr_value == list_value) else: msg = (_("Unable to filter by unknown operator '%s'.") % operator) raise exception.InvalidFilterOperatorValue(msg) elif k != 'is_public' and image.get(k) is not None: to_add = image.get(key) == value elif k == 'tags': filter_tags = value image_tags = image_tag_get_all(context, image['id']) for tag in filter_tags: if tag not in image_tags: to_add = False break else: to_add = False for p in image['properties']: properties = {p['name']: p['value'], 'deleted': p['deleted']} to_add |= (properties.get(key) == value and properties.get('deleted') is False) if not to_add: break if to_add: filtered_images.append(image) return filtered_images def _do_pagination(context, images, marker, limit, show_deleted, status='accepted'): start = 0 end = -1 if marker is None: start = 0 else: # Check that the image is accessible _image_get(context, marker, force_show_deleted=show_deleted, status=status) for i, image in enumerate(images): if image['id'] == marker: start = i + 1 break else: raise exception.ImageNotFound() end = start + limit if limit is not None else None return images[start:end] def _sort_images(images, sort_key, sort_dir): sort_key = ['created_at'] if not sort_key else sort_key default_sort_dir = 'desc' if not sort_dir: sort_dir = [default_sort_dir] * len(sort_key) elif len(sort_dir) == 1: default_sort_dir = sort_dir[0] sort_dir *= len(sort_key) for key in ['created_at', 'id']: if key not in sort_key: sort_key.append(key) sort_dir.append(default_sort_dir) for key in sort_key: if images and not (key in images[0]): raise exception.InvalidSortKey() if any(dir for dir in sort_dir if dir not in ['asc', 'desc']): raise exception.InvalidSortDir() if len(sort_key) != len(sort_dir): raise exception.Invalid(message='Number of sort dirs does not match ' 'the number of sort keys') for key, dir in reversed(list(zip(sort_key, sort_dir))): reverse = dir == 'desc' images.sort(key=lambda x: x[key] or '', reverse=reverse) return images def image_set_property_atomic(image_id, name, value): try: image = DATA['images'][image_id] except KeyError: LOG.warning(_LW('Could not find image %s'), image_id) raise exception.ImageNotFound() prop = _image_property_format(image_id, name, value) image['properties'].append(prop) def image_delete_property_atomic(image_id, name, value): try: image = DATA['images'][image_id] except KeyError: LOG.warning(_LW('Could not find image %s'), image_id) raise exception.ImageNotFound() for i, prop in enumerate(image['properties']): if prop['name'] == name and prop['value'] == value: del image['properties'][i] return raise exception.NotFound() def _image_get(context, image_id, force_show_deleted=False, status=None): try: image = DATA['images'][image_id] except KeyError: LOG.warning(_LW('Could not find image %s'), image_id) raise exception.ImageNotFound() if image['deleted'] and not (force_show_deleted or context.can_see_deleted): LOG.warning(_LW('Unable to get deleted image')) raise exception.ImageNotFound() if not is_image_visible(context, image): LOG.warning(_LW('Unable to get unowned image')) raise exception.Forbidden("Image not visible to you") return image @log_call def image_get(context, image_id, session=None, force_show_deleted=False, v1_mode=False): image = copy.deepcopy(_image_get(context, image_id, force_show_deleted)) image = _normalize_locations(context, image, force_show_deleted=force_show_deleted) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image @log_call def tasks_get_by_image(context, image_id): db_tasks = DATA['tasks'] tasks = [] for task in db_tasks: if db_tasks[task]['image_id'] == image_id: if _is_task_visible(context, db_tasks[task]): tasks.append(db_tasks[task]) return tasks @log_call def image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False, v1_mode=False): filters = filters or {} images = DATA['images'].values() images = _filter_images(images, filters, context, member_status, is_public, admin_as_user) images = _sort_images(images, sort_key, sort_dir) images = _do_pagination(context, images, marker, limit, filters.get('deleted')) force_show_deleted = True if filters.get('deleted') else False res = [] for image in images: img = _normalize_locations(context, copy.deepcopy(image), force_show_deleted=force_show_deleted) if return_tag: img['tags'] = image_tag_get_all(context, img['id']) if v1_mode: img = db_utils.mutate_image_dict_to_v1(img) res.append(img) return res def image_restore(context, image_id): """Restore the pending-delete image to active.""" image = _image_get(context, image_id) if image['status'] != 'pending_delete': msg = (_('cannot restore the image from %s to active (wanted ' 'from_state=pending_delete)') % image['status']) raise exception.Conflict(msg) values = {'status': 'active', 'deleted': 0} image_update(context, image_id, values) @log_call def image_property_create(context, values): image = _image_get(context, values['image_id']) prop = _image_property_format(values['image_id'], values['name'], values['value']) image['properties'].append(prop) return prop @log_call def image_property_delete(context, prop_ref, image_ref): prop = None for p in DATA['images'][image_ref]['properties']: if p['name'] == prop_ref: prop = p if not prop: raise exception.NotFound() prop['deleted_at'] = timeutils.utcnow() prop['deleted'] = True return prop @log_call def image_member_find(context, image_id=None, member=None, status=None, include_deleted=False): filters = [] images = DATA['images'] members = DATA['members'] def is_visible(member): return (member['member'] == context.owner or images[member['image_id']]['owner'] == context.owner) if not context.is_admin: filters.append(is_visible) if image_id is not None: filters.append(lambda m: m['image_id'] == image_id) if member is not None: filters.append(lambda m: m['member'] == member) if status is not None: filters.append(lambda m: m['status'] == status) for f in filters: members = filter(f, members) return [copy.deepcopy(m) for m in members] @log_call def image_member_count(context, image_id): """Return the number of image members for this image :param image_id: identifier of image entity """ if not image_id: msg = _("Image id is required.") raise exception.Invalid(msg) members = DATA['members'] return len([x for x in members if x['image_id'] == image_id]) @log_call @utils.no_4byte_params def image_member_create(context, values): member = _image_member_format(values['image_id'], values['member'], values.get('can_share', False), values.get('status', 'pending'), values.get('deleted', False)) global DATA DATA['members'].append(member) return copy.deepcopy(member) @log_call def image_member_update(context, member_id, values): global DATA for member in DATA['members']: if member['id'] == member_id: member.update(values) member['updated_at'] = timeutils.utcnow() return copy.deepcopy(member) else: raise exception.NotFound() @log_call def image_member_delete(context, member_id): global DATA for i, member in enumerate(DATA['members']): if member['id'] == member_id: del DATA['members'][i] break else: raise exception.NotFound() @log_call @utils.no_4byte_params def image_location_add(context, image_id, location): deleted = location['status'] in ('deleted', 'pending_delete') location_ref = _image_location_format(image_id, value=location['url'], meta_data=location['metadata'], status=location['status'], deleted=deleted) DATA['locations'].append(location_ref) image = DATA['images'][image_id] image.setdefault('locations', []).append(location_ref) @log_call @utils.no_4byte_params def image_location_update(context, image_id, location): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None updated = False for loc in DATA['locations']: if loc['id'] == loc_id and loc['image_id'] == image_id: loc.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) updated = True break if not updated: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warning(msg) raise exception.NotFound(msg) @log_call def image_location_delete(context, image_id, location_id, status, delete_time=None): if status not in ('deleted', 'pending_delete'): msg = _("The status of deleted image location can only be set to " "'pending_delete' or 'deleted'.") raise exception.Invalid(msg) deleted = False for loc in DATA['locations']: if loc['id'] == location_id and loc['image_id'] == image_id: deleted = True delete_time = delete_time or timeutils.utcnow() loc.update({"deleted": deleted, "status": status, "updated_at": delete_time, "deleted_at": delete_time}) break if not deleted: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=location_id, img=image_id)) LOG.warning(msg) raise exception.NotFound(msg) def _image_locations_set(context, image_id, locations): # NOTE(zhiyan): 1. Remove records from DB for deleted locations used_loc_ids = [loc['id'] for loc in locations if loc.get('id')] image = DATA['images'][image_id] for loc in image['locations']: if loc['id'] not in used_loc_ids and not loc['deleted']: image_location_delete(context, image_id, loc['id'], 'deleted') for i, loc in enumerate(DATA['locations']): if (loc['image_id'] == image_id and loc['id'] not in used_loc_ids and not loc['deleted']): del DATA['locations'][i] # NOTE(zhiyan): 2. Adding or update locations for loc in locations: if loc.get('id') is None: image_location_add(context, image_id, loc) else: image_location_update(context, image_id, loc) def _image_locations_delete_all(context, image_id, delete_time=None): image = DATA['images'][image_id] for loc in image['locations']: if not loc['deleted']: image_location_delete(context, image_id, loc['id'], 'deleted', delete_time=delete_time) for i, loc in enumerate(DATA['locations']): if image_id == loc['image_id'] and loc['deleted'] == False: del DATA['locations'][i] def _normalize_locations(context, image, force_show_deleted=False): """ Generate suitable dictionary list for locations field of image. We don't need to set other data fields of location record which return from image query. """ if image['status'] == 'deactivated' and not context.is_admin: # Locations are not returned for a deactivated image for non-admin user image['locations'] = [] return image if force_show_deleted: locations = image['locations'] else: locations = [x for x in image['locations'] if not x['deleted']] image['locations'] = [{'id': loc['id'], 'url': loc['url'], 'metadata': loc['metadata'], 'status': loc['status']} for loc in locations] return image @log_call def image_create(context, image_values, v1_mode=False): global DATA image_id = image_values.get('id', str(uuid.uuid4())) if image_id in DATA['images']: raise exception.Duplicate() if 'status' not in image_values: raise exception.Invalid('status is a required attribute') allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size', 'virtual_size', 'checksum', 'locations', 'owner', 'protected', 'is_public', 'container_format', 'disk_format', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'properties', 'tags', 'visibility', 'os_hidden', 'os_hash_algo', 'os_hash_value']) incorrect_keys = set(image_values.keys()) - allowed_keys if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) image = _image_format(image_id, **image_values) DATA['images'][image_id] = image DATA['tags'][image_id] = image.pop('tags', []) image = _normalize_locations(context, copy.deepcopy(image)) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image @log_call def image_update(context, image_id, image_values, purge_props=False, from_state=None, v1_mode=False, atomic_props=None): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.ImageNotFound(image_id) location_data = image_values.pop('locations', None) if location_data is not None: _image_locations_set(context, image_id, location_data) if atomic_props is None: atomic_props = [] # replace values for properties that already exist new_properties = image_values.pop('properties', {}) for prop in image['properties']: if prop['name'] in atomic_props: continue elif prop['name'] in new_properties: prop['value'] = new_properties.pop(prop['name']) elif purge_props: # this matches weirdness in the sqlalchemy api prop['deleted'] = True image['updated_at'] = timeutils.utcnow() _image_update(image, image_values, {k: v for k, v in new_properties.items() if k not in atomic_props}) DATA['images'][image_id] = image image = _normalize_locations(context, copy.deepcopy(image)) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image @log_call def image_destroy(context, image_id): global DATA try: delete_time = timeutils.utcnow() DATA['images'][image_id]['deleted'] = True DATA['images'][image_id]['deleted_at'] = delete_time # NOTE(flaper87): Move the image to one of the deleted statuses # if it hasn't been done yet. if (DATA['images'][image_id]['status'] not in ['deleted', 'pending_delete']): DATA['images'][image_id]['status'] = 'deleted' _image_locations_delete_all(context, image_id, delete_time=delete_time) for prop in DATA['images'][image_id]['properties']: image_property_delete(context, prop['name'], image_id) members = image_member_find(context, image_id=image_id) for member in members: image_member_delete(context, member['id']) tags = image_tag_get_all(context, image_id) for tag in tags: image_tag_delete(context, image_id, tag) return _normalize_locations(context, copy.deepcopy(DATA['images'][image_id])) except KeyError: raise exception.ImageNotFound() @log_call def image_tag_get_all(context, image_id): return DATA['tags'].get(image_id, []) @log_call def image_tag_get(context, image_id, value): tags = image_tag_get_all(context, image_id) if value in tags: return value else: raise exception.NotFound() @log_call def image_tag_set_all(context, image_id, values): global DATA DATA['tags'][image_id] = list(values) @log_call @utils.no_4byte_params def image_tag_create(context, image_id, value): global DATA DATA['tags'][image_id].append(value) return value @log_call def image_tag_delete(context, image_id, value): global DATA try: DATA['tags'][image_id].remove(value) except ValueError: raise exception.NotFound() def is_image_visible(context, image, status=None): if status == 'all': status = None return db_utils.is_image_visible(context, image, image_member_find, status) def user_get_storage_usage(context, owner_id, image_id=None, session=None): images = image_get_all(context, filters={'owner': owner_id}) total = 0 for image in images: if image['status'] in ['killed', 'deleted']: continue if image['id'] != image_id: locations = [loc for loc in image['locations'] if loc.get('status') != 'deleted'] total += (image['size'] * len(locations)) return total @log_call def task_create(context, values): """Create a task object""" global DATA task_values = copy.deepcopy(values) task_id = task_values.get('id', str(uuid.uuid4())) required_attributes = ['type', 'status', 'input'] allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner', 'message', 'expires_at', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'image_id', 'request_id', 'user_id'] if task_id in DATA['tasks']: raise exception.Duplicate() for key in required_attributes: if key not in task_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(task_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) task_info_values = _pop_task_info_values(task_values) task = _task_format(task_id, **task_values) DATA['tasks'][task_id] = task task_info = _task_info_create(task['id'], task_info_values) return _format_task_from_db(task, task_info) @log_call def task_update(context, task_id, values): """Update a task object""" global DATA task_values = copy.deepcopy(values) task_info_values = _pop_task_info_values(task_values) try: task = DATA['tasks'][task_id] except KeyError: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) task.update(task_values) task['updated_at'] = timeutils.utcnow() DATA['tasks'][task_id] = task task_info = _task_info_update(task['id'], task_info_values) return _format_task_from_db(task, task_info) @log_call def task_get(context, task_id, force_show_deleted=False): task, task_info = _task_get(context, task_id, force_show_deleted) return _format_task_from_db(task, task_info) def _task_get(context, task_id, force_show_deleted=False): try: task = DATA['tasks'][task_id] except KeyError: msg = _LW('Could not find task %s') % task_id LOG.warning(msg) raise exception.TaskNotFound(task_id=task_id) if task['deleted'] and not (force_show_deleted or context.can_see_deleted): msg = _LW('Unable to get deleted task %s') % task_id LOG.warning(msg) raise exception.TaskNotFound(task_id=task_id) if not _is_task_visible(context, task): LOG.debug("Forbidding request, task %s is not visible", task_id) msg = _("Forbidding request, task %s is not visible") % task_id raise exception.Forbidden(msg) task_info = _task_info_get(task_id) return task, task_info @log_call def task_delete(context, task_id): global DATA try: DATA['tasks'][task_id]['deleted'] = True DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow() DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow() return copy.deepcopy(DATA['tasks'][task_id]) except KeyError: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) def _task_soft_delete(context): """Scrub task entities which are expired """ global DATA now = timeutils.utcnow() tasks = DATA['tasks'].values() for task in tasks: if (task['owner'] == context.owner and task['deleted'] == False and task['expires_at'] <= now): task['deleted'] = True task['deleted_at'] = timeutils.utcnow() @log_call def task_get_all(context, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """ Get all tasks that match zero or more filters. :param filters: dict of filter keys and values. :param marker: task id after which to start page :param limit: maximum number of tasks to return :param sort_key: task attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :returns: tasks set """ _task_soft_delete(context) filters = filters or {} tasks = DATA['tasks'].values() tasks = _filter_tasks(tasks, filters, context) tasks = _sort_tasks(tasks, sort_key, sort_dir) tasks = _paginate_tasks(context, tasks, marker, limit, filters.get('deleted')) filtered_tasks = [] for task in tasks: filtered_tasks.append(_format_task_from_db(task, task_info_ref=None)) return filtered_tasks def _is_task_visible(context, task): """Return True if the task is visible in this context.""" # Is admin == task visible if context.is_admin: return True # No owner == task visible if task['owner'] is None: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == task['owner']: return True return False def _filter_tasks(tasks, filters, context, admin_as_user=False): filtered_tasks = [] for task in tasks: has_ownership = context.owner and task['owner'] == context.owner can_see = (has_ownership or (context.is_admin and not admin_as_user)) if not can_see: continue add = True for k, value in filters.items(): add = task[k] == value and task['deleted'] is False if not add: break if add: filtered_tasks.append(task) return filtered_tasks def _sort_tasks(tasks, sort_key, sort_dir): reverse = False if tasks and not (sort_key in tasks[0]): raise exception.InvalidSortKey() def keyfn(x): return (x[sort_key] if x[sort_key] is not None else '', x['created_at'], x['id']) reverse = sort_dir == 'desc' tasks.sort(key=keyfn, reverse=reverse) return tasks def _paginate_tasks(context, tasks, marker, limit, show_deleted): start = 0 end = -1 if marker is None: start = 0 else: # Check that the task is accessible _task_get(context, marker, force_show_deleted=show_deleted) for i, task in enumerate(tasks): if task['id'] == marker: start = i + 1 break else: if task: raise exception.TaskNotFound(task_id=task['id']) else: msg = _("Task does not exist") raise exception.NotFound(message=msg) end = start + limit if limit is not None else None return tasks[start:end] def _task_info_create(task_id, values): """Create a Task Info for Task with given task ID""" global DATA task_info = _task_info_format(task_id, **values) DATA['task_info'][task_id] = task_info return task_info def _task_info_update(task_id, values): """Update Task Info for Task with given task ID and updated values""" global DATA try: task_info = DATA['task_info'][task_id] except KeyError: LOG.debug("No task info found with task id %s", task_id) raise exception.TaskNotFound(task_id=task_id) task_info.update(values) DATA['task_info'][task_id] = task_info return task_info def _task_info_get(task_id): """Get Task Info for Task with given task ID""" global DATA try: task_info = DATA['task_info'][task_id] except KeyError: msg = _LW('Could not find task info %s') % task_id LOG.warning(msg) raise exception.TaskNotFound(task_id=task_id) return task_info def _metadef_delete_namespace_content(get_func, key, context, namespace_name): global DATA metadefs = get_func(context, namespace_name) data = DATA[key] for metadef in metadefs: data.remove(metadef) return metadefs @log_call @utils.no_4byte_params def metadef_namespace_create(context, values): """Create a namespace object""" global DATA namespace_values = copy.deepcopy(values) namespace_name = namespace_values.get('namespace') required_attributes = ['namespace', 'owner'] allowed_attributes = ['namespace', 'owner', 'display_name', 'description', 'visibility', 'protected'] for namespace in DATA['metadef_namespaces']: if namespace['namespace'] == namespace_name: LOG.debug("Can not create the metadata definition namespace. " "Namespace=%s already exists.", namespace_name) raise exception.MetadefDuplicateNamespace( namespace_name=namespace_name) for key in required_attributes: if key not in namespace_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(namespace_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) namespace = _format_namespace(namespace_values) DATA['metadef_namespaces'].append(namespace) return namespace @log_call @utils.no_4byte_params def metadef_namespace_update(context, namespace_id, values): """Update a namespace object""" global DATA namespace_values = copy.deepcopy(values) namespace = metadef_namespace_get_by_id(context, namespace_id) if namespace['namespace'] != values['namespace']: for db_namespace in DATA['metadef_namespaces']: if db_namespace['namespace'] == values['namespace']: LOG.debug("Invalid update. It would result in a duplicate " "metadata definition namespace with the same " "name of %s", values['namespace']) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition namespace with the same" " name of %s") % values['namespace']) raise exception.MetadefDuplicateNamespace(emsg) DATA['metadef_namespaces'].remove(namespace) namespace.update(namespace_values) namespace['updated_at'] = timeutils.utcnow() DATA['metadef_namespaces'].append(namespace) return namespace @log_call def metadef_namespace_get_by_id(context, namespace_id): """Get a namespace object""" try: namespace = next(namespace for namespace in DATA['metadef_namespaces'] if namespace['id'] == namespace_id) except StopIteration: msg = (_("Metadata definition namespace not found for id=%s") % namespace_id) LOG.warning(msg) raise exception.MetadefNamespaceNotFound(msg) if not _is_namespace_visible(context, namespace): LOG.debug("Forbidding request, metadata definition namespace=%s " "is not visible.", namespace.namespace) emsg = _("Forbidding request, metadata definition namespace=%s " "is not visible.") % namespace.namespace raise exception.MetadefForbidden(emsg) return namespace @log_call def metadef_namespace_get(context, namespace_name): """Get a namespace object""" try: namespace = next(namespace for namespace in DATA['metadef_namespaces'] if namespace['namespace'] == namespace_name) except StopIteration: LOG.debug("No namespace found with name %s", namespace_name) raise exception.MetadefNamespaceNotFound( namespace_name=namespace_name) _check_namespace_visibility(context, namespace, namespace_name) return namespace @log_call def metadef_namespace_get_all(context, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): """Get a namespaces list""" resource_types = filters.get('resource_types', []) if filters else [] visibility = filters.get('visibility') if filters else None namespaces = [] for namespace in DATA['metadef_namespaces']: if not _is_namespace_visible(context, namespace): continue if visibility and namespace['visibility'] != visibility: continue if resource_types: for association in DATA['metadef_namespace_resource_types']: if association['namespace_id'] == namespace['id']: if association['name'] in resource_types: break else: continue namespaces.append(namespace) return namespaces @log_call def metadef_namespace_delete(context, namespace_name): """Delete a namespace object""" global DATA namespace = metadef_namespace_get(context, namespace_name) DATA['metadef_namespaces'].remove(namespace) return namespace @log_call def metadef_namespace_delete_content(context, namespace_name): """Delete a namespace content""" global DATA namespace = metadef_namespace_get(context, namespace_name) namespace_id = namespace['id'] objects = [] for object in DATA['metadef_objects']: if object['namespace_id'] != namespace_id: objects.append(object) DATA['metadef_objects'] = objects properties = [] for property in DATA['metadef_objects']: if property['namespace_id'] != namespace_id: properties.append(object) DATA['metadef_objects'] = properties return namespace @log_call def metadef_object_get(context, namespace_name, object_name): """Get a metadef object""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if (object['namespace_id'] == namespace['id'] and object['name'] == object_name): return object else: LOG.debug("The metadata definition object with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': object_name, 'namespace_name': namespace_name}) raise exception.MetadefObjectNotFound(namespace_name=namespace_name, object_name=object_name) @log_call def metadef_object_get_by_id(context, namespace_name, object_id): """Get a metadef object""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if (object['namespace_id'] == namespace['id'] and object['id'] == object_id): return object else: msg = (_("Metadata definition object not found for id=%s") % object_id) LOG.warning(msg) raise exception.MetadefObjectNotFound(msg) @log_call def metadef_object_get_all(context, namespace_name): """Get a metadef objects list""" namespace = metadef_namespace_get(context, namespace_name) objects = [] _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if object['namespace_id'] == namespace['id']: objects.append(object) return objects @log_call @utils.no_4byte_params def metadef_object_create(context, namespace_name, values): """Create a metadef object""" global DATA object_values = copy.deepcopy(values) object_name = object_values['name'] required_attributes = ['name'] allowed_attributes = ['name', 'description', 'json_schema', 'required'] namespace = metadef_namespace_get(context, namespace_name) for object in DATA['metadef_objects']: if (object['name'] == object_name and object['namespace_id'] == namespace['id']): LOG.debug("A metadata definition object with name=%(name)s " "in namespace=%(namespace_name)s already exists.", {'name': object_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateObject( object_name=object_name, namespace_name=namespace_name) for key in required_attributes: if key not in object_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(object_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) object_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) object = _format_object(object_values) DATA['metadef_objects'].append(object) return object @log_call @utils.no_4byte_params def metadef_object_update(context, namespace_name, object_id, values): """Update a metadef object""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) object = metadef_object_get_by_id(context, namespace_name, object_id) if object['name'] != values['name']: for db_object in DATA['metadef_objects']: if (db_object['name'] == values['name'] and db_object['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate " "metadata definition object with same name=%(name)s " "in namespace=%(namespace_name)s.", {'name': object['name'], 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition object with the same" " name=%(name)s " " in namespace=%(namespace_name)s.") % {'name': object['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateObject(emsg) DATA['metadef_objects'].remove(object) object.update(values) object['updated_at'] = timeutils.utcnow() DATA['metadef_objects'].append(object) return object @log_call def metadef_object_delete(context, namespace_name, object_name): """Delete a metadef object""" global DATA object = metadef_object_get(context, namespace_name, object_name) DATA['metadef_objects'].remove(object) return object def metadef_object_delete_namespace_content(context, namespace_name): """Delete an object or raise if namespace or object doesn't exist.""" return _metadef_delete_namespace_content( metadef_object_get_all, 'metadef_objects', context, namespace_name) @log_call def metadef_object_count(context, namespace_name): """Get metadef object count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for object in DATA['metadef_objects']: if object['namespace_id'] == namespace['id']: count = count + 1 return count @log_call def metadef_property_count(context, namespace_name): """Get properties count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for property in DATA['metadef_properties']: if property['namespace_id'] == namespace['id']: count = count + 1 return count @log_call @utils.no_4byte_params def metadef_property_create(context, namespace_name, values): """Create a metadef property""" global DATA property_values = copy.deepcopy(values) property_name = property_values['name'] required_attributes = ['name'] allowed_attributes = ['name', 'description', 'json_schema', 'required'] namespace = metadef_namespace_get(context, namespace_name) for property in DATA['metadef_properties']: if (property['name'] == property_name and property['namespace_id'] == namespace['id']): LOG.debug("Can not create metadata definition property. A property" " with name=%(name)s already exists in" " namespace=%(namespace_name)s.", {'name': property_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateProperty( property_name=property_name, namespace_name=namespace_name) for key in required_attributes: if key not in property_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(property_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) property_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) property = _format_property(property_values) DATA['metadef_properties'].append(property) return property @log_call @utils.no_4byte_params def metadef_property_update(context, namespace_name, property_id, values): """Update a metadef property""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) property = metadef_property_get_by_id(context, namespace_name, property_id) if property['name'] != values['name']: for db_property in DATA['metadef_properties']: if (db_property['name'] == values['name'] and db_property['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate" " metadata definition property with the same" " name=%(name)s" " in namespace=%(namespace_name)s.", {'name': property['name'], 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition property with the same" " name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': property['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateProperty(emsg) DATA['metadef_properties'].remove(property) property.update(values) property['updated_at'] = timeutils.utcnow() DATA['metadef_properties'].append(property) return property @log_call def metadef_property_get_all(context, namespace_name): """Get a metadef properties list""" namespace = metadef_namespace_get(context, namespace_name) properties = [] _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if property['namespace_id'] == namespace['id']: properties.append(property) return properties @log_call def metadef_property_get_by_id(context, namespace_name, property_id): """Get a metadef property""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if (property['namespace_id'] == namespace['id'] and property['id'] == property_id): return property else: msg = (_("Metadata definition property not found for id=%s") % property_id) LOG.warning(msg) raise exception.MetadefPropertyNotFound(msg) @log_call def metadef_property_get(context, namespace_name, property_name): """Get a metadef property""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if (property['namespace_id'] == namespace['id'] and property['name'] == property_name): return property else: LOG.debug("No property found with name=%(name)s in" " namespace=%(namespace_name)s ", {'name': property_name, 'namespace_name': namespace_name}) raise exception.MetadefPropertyNotFound(namespace_name=namespace_name, property_name=property_name) @log_call def metadef_property_delete(context, namespace_name, property_name): """Delete a metadef property""" global DATA property = metadef_property_get(context, namespace_name, property_name) DATA['metadef_properties'].remove(property) return property def metadef_property_delete_namespace_content(context, namespace_name): """Delete a property or raise if it or namespace doesn't exist.""" return _metadef_delete_namespace_content( metadef_property_get_all, 'metadef_properties', context, namespace_name) @log_call def metadef_resource_type_create(context, values): """Create a metadef resource type""" global DATA resource_type_values = copy.deepcopy(values) resource_type_name = resource_type_values['name'] allowed_attrubites = ['name', 'protected'] for resource_type in DATA['metadef_resource_types']: if resource_type['name'] == resource_type_name: raise exception.Duplicate() incorrect_keys = set(resource_type_values.keys()) - set(allowed_attrubites) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) resource_type = _format_resource_type(resource_type_values) DATA['metadef_resource_types'].append(resource_type) return resource_type @log_call def metadef_resource_type_get_all(context): """List all resource types""" return DATA['metadef_resource_types'] @log_call def metadef_resource_type_get(context, resource_type_name): """Get a resource type""" try: resource_type = next(resource_type for resource_type in DATA['metadef_resource_types'] if resource_type['name'] == resource_type_name) except StopIteration: LOG.debug("No resource type found with name %s", resource_type_name) raise exception.MetadefResourceTypeNotFound( resource_type_name=resource_type_name) return resource_type @log_call def metadef_resource_type_association_create(context, namespace_name, values): global DATA association_values = copy.deepcopy(values) namespace = metadef_namespace_get(context, namespace_name) resource_type_name = association_values['name'] resource_type = metadef_resource_type_get(context, resource_type_name) required_attributes = ['name', 'properties_target', 'prefix'] allowed_attributes = copy.deepcopy(required_attributes) for association in DATA['metadef_namespace_resource_types']: if (association['namespace_id'] == namespace['id'] and association['resource_type'] == resource_type['id']): LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace=%(namespace_name)s, already exists.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateResourceTypeAssociation( resource_type_name=resource_type_name, namespace_name=namespace_name) for key in required_attributes: if key not in association_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(association_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) association = _format_association(namespace, resource_type, association_values) DATA['metadef_namespace_resource_types'].append(association) return association @log_call def metadef_resource_type_association_get(context, namespace_name, resource_type_name): namespace = metadef_namespace_get(context, namespace_name) resource_type = metadef_resource_type_get(context, resource_type_name) for association in DATA['metadef_namespace_resource_types']: if (association['namespace_id'] == namespace['id'] and association['resource_type'] == resource_type['id']): return association else: LOG.debug("No resource type association found associated with " "namespace %s and resource type %s", namespace_name, resource_type_name) raise exception.MetadefResourceTypeAssociationNotFound( resource_type_name=resource_type_name, namespace_name=namespace_name) @log_call def metadef_resource_type_association_get_all_by_namespace(context, namespace_name): namespace = metadef_namespace_get(context, namespace_name) namespace_resource_types = [] for resource_type in DATA['metadef_namespace_resource_types']: if resource_type['namespace_id'] == namespace['id']: namespace_resource_types.append(resource_type) return namespace_resource_types @log_call def metadef_resource_type_association_delete(context, namespace_name, resource_type_name): global DATA resource_type = metadef_resource_type_association_get(context, namespace_name, resource_type_name) DATA['metadef_namespace_resource_types'].remove(resource_type) return resource_type @log_call def metadef_tag_get(context, namespace_name, name): """Get a metadef tag""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id'] and tag['name'] == name: return tag else: LOG.debug("The metadata definition tag with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exception.MetadefTagNotFound(name=name, namespace_name=namespace_name) @log_call def metadef_tag_get_by_id(context, namespace_name, id): """Get a metadef tag""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id'] and tag['id'] == id: return tag else: msg = (_("Metadata definition tag not found for id=%s") % id) LOG.warning(msg) raise exception.MetadefTagNotFound(msg) @log_call def metadef_tag_get_all(context, namespace_name, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir=None, session=None): """Get a metadef tags list""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) tags = [] for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id']: tags.append(tag) return tags @log_call @utils.no_4byte_params def metadef_tag_create(context, namespace_name, values): """Create a metadef tag""" global DATA tag_values = copy.deepcopy(values) tag_name = tag_values['name'] required_attributes = ['name'] allowed_attributes = ['name'] namespace = metadef_namespace_get(context, namespace_name) for tag in DATA['metadef_tags']: if tag['name'] == tag_name and tag['namespace_id'] == namespace['id']: LOG.debug("A metadata definition tag with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': tag_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag_name, namespace_name=namespace_name) for key in required_attributes: if key not in tag_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) tag_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) tag = _format_tag(tag_values) DATA['metadef_tags'].append(tag) return tag @log_call def metadef_tag_create_tags(context, namespace_name, tag_list, can_append=False): """Create a metadef tag""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) required_attributes = ['name'] allowed_attributes = ['name'] data_tag_list = [] tag_name_list = [] if can_append: # NOTE(mrjoshi): We need to fetch existing tags here for duplicate # check while adding new one tag_name_list = [tag['name'] for tag in metadef_tag_get_all(context, namespace_name)] for tag_value in tag_list: tag_values = copy.deepcopy(tag_value) tag_name = tag_values['name'] for key in required_attributes: if key not in tag_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) if tag_name in tag_name_list: LOG.debug("A metadata definition tag with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': tag_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag_name, namespace_name=namespace_name) else: tag_name_list.append(tag_name) tag_values['namespace_id'] = namespace['id'] data_tag_list.append(_format_tag(tag_values)) if not can_append: DATA['metadef_tags'] = [] for tag in data_tag_list: DATA['metadef_tags'].append(tag) return data_tag_list @log_call @utils.no_4byte_params def metadef_tag_update(context, namespace_name, id, values): """Update a metadef tag""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) tag = metadef_tag_get_by_id(context, namespace_name, id) if tag['name'] != values['name']: for db_tag in DATA['metadef_tags']: if (db_tag['name'] == values['name'] and db_tag['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate" " metadata definition tag with same name=%(name)s " " in namespace=%(namespace_name)s.", {'name': tag['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag['name'], namespace_name=namespace_name) DATA['metadef_tags'].remove(tag) tag.update(values) tag['updated_at'] = timeutils.utcnow() DATA['metadef_tags'].append(tag) return tag @log_call def metadef_tag_delete(context, namespace_name, name): """Delete a metadef tag""" global DATA tags = metadef_tag_get(context, namespace_name, name) DATA['metadef_tags'].remove(tags) return tags def metadef_tag_delete_namespace_content(context, namespace_name): """Delete an tag or raise if namespace or tag doesn't exist.""" return _metadef_delete_namespace_content( metadef_tag_get_all, 'metadef_tags', context, namespace_name) @log_call def metadef_tag_count(context, namespace_name): """Get metadef tag count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id']: count = count + 1 return count def _format_association(namespace, resource_type, association_values): association = { 'namespace_id': namespace['id'], 'resource_type': resource_type['id'], 'properties_target': None, 'prefix': None, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow() } association.update(association_values) return association def _format_resource_type(values): dt = timeutils.utcnow() resource_type = { 'id': _get_metadef_id(), 'name': values['name'], 'protected': True, 'created_at': dt, 'updated_at': dt } resource_type.update(values) return resource_type def _format_property(values): property = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'json_schema': None } property.update(values) return property def _format_namespace(values): dt = timeutils.utcnow() namespace = { 'id': _get_metadef_id(), 'namespace': None, 'display_name': None, 'description': None, 'visibility': 'private', 'protected': False, 'owner': None, 'created_at': dt, 'updated_at': dt } namespace.update(values) return namespace def _format_object(values): dt = timeutils.utcnow() object = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'description': None, 'json_schema': None, 'required': None, 'created_at': dt, 'updated_at': dt } object.update(values) return object def _format_tag(values): dt = timeutils.utcnow() tag = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'created_at': dt, 'updated_at': dt } tag.update(values) return tag def _is_namespace_visible(context, namespace): """Return true if namespace is visible in this context""" if context.is_admin: return True if namespace.get('visibility', '') == 'public': return True if namespace['owner'] is None: return True if context.owner is not None: if context.owner == namespace['owner']: return True return False def _check_namespace_visibility(context, namespace, namespace_name): if not _is_namespace_visible(context, namespace): LOG.debug("Forbidding request, metadata definition namespace=%s " "is not visible.", namespace_name) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % namespace_name raise exception.MetadefForbidden(emsg) def _get_metadef_id(): global INDEX INDEX += 1 return INDEX def _cached_image_format(cached_image): """Format a cached image for consumption outside of this module""" image_dict = { 'id': cached_image['id'], 'image_id': cached_image['image_id'], 'last_accessed': cached_image['last_accessed'].timestamp(), 'last_modified': cached_image['last_modified'].timestamp(), 'size': cached_image['size'], 'hits': cached_image['hits'], 'checksum': cached_image['checksum'] } return image_dict @log_call def node_reference_get_by_url(context, node_reference_url): global DATA db_data = DATA['node_reference'] for node_reference in db_data: if db_data[node_reference]['node_reference_url'] == node_reference_url: return db_data[node_reference] else: raise exception.NotFound() @log_call def node_reference_create(context, node_reference_url, **values): global DATA node_reference_id = values.get('node_reference_id', 1) if node_reference_id in DATA['node_reference']: raise exception.Duplicate() node_reference = { 'node_reference_id': node_reference_id, 'node_reference_url': node_reference_url } DATA['node_reference'][node_reference_id] = node_reference return node_reference @log_call def get_hit_count(context, image_id, node_reference_url): global DATA if image_id not in DATA['cached_images']: return 0 cached_image = _cached_image_format(DATA['cached_images'][image_id]) return cached_image['hits'] @log_call def get_cached_images(context, node_reference_url): global DATA node_reference = node_reference_get_by_url(context, node_reference_url) all_images = DATA['cached_images'] cached_images = [] for image_id in all_images: if all_images[image_id]['node_reference_id'] == \ node_reference['node_reference_id']: cached_images.append(_cached_image_format(all_images[image_id])) return cached_images @log_call def delete_all_cached_images(context, node_reference_url): global DATA node_reference = node_reference_get_by_url(context, node_reference_url) all_images = all_images = tuple(DATA['cached_images'].keys()) for image_id in all_images: if DATA['cached_images'][image_id]['node_reference_id'] == \ node_reference['node_reference_id']: del DATA['cached_images'][image_id] @log_call def delete_cached_image(context, image_id, node_reference_url): global DATA node_reference = node_reference_get_by_url(context, node_reference_url) all_images = tuple(DATA['cached_images'].keys()) for image in all_images: if DATA['cached_images'][image]['node_reference_id'] == \ node_reference['node_reference_id'] and image_id == \ DATA['cached_images'][image]['image_id']: del DATA['cached_images'][image] break @log_call def get_least_recently_accessed(context, node_reference_url): global DATA all_images = get_cached_images(context, node_reference_url) if all_images: return all_images[0]['image_id'] return None @log_call def is_image_cached_for_node(context, node_reference_url, image_id): global DATA node_reference = node_reference_get_by_url(context, node_reference_url) all_images = DATA['cached_images'] for image_id in all_images: if all_images[image_id]['node_reference_id'] == \ node_reference['node_reference_id'] and image_id == \ all_images[image_id]['image_id']: return True return False @log_call def insert_cache_details(context, node_reference_url, image_id, size, checksum=None, last_accessed=None, last_modified=None, hits=None): global DATA node_reference = node_reference_get_by_url(context, node_reference_url) accessed = last_accessed or timeutils.utcnow() modified = last_modified or timeutils.utcnow() values = { 'last_accessed': accessed, 'last_modified': modified, 'node_reference_id': node_reference['node_reference_id'], 'checksum': checksum, 'image_id': image_id, 'size': size, 'hits': hits or 0, 'id': str(uuid.uuid4()), } if image_id in DATA['cached_images']: raise exception.Duplicate() DATA['cached_images'][image_id] = values @log_call def update_hit_count(context, image_id, node_reference_url): global DATA last_hit_count = get_hit_count(context, image_id, node_reference_url) node_reference = node_reference_get_by_url(context, node_reference_url) all_images = DATA['cached_images'] last_accessed = timeutils.utcnow() values = { 'hits': last_hit_count + 1, 'last_accessed': last_accessed } for image_id in all_images: if all_images[image_id]['node_reference_id'] == \ node_reference['node_reference_id'] and image_id == \ all_images[image_id]['image_id']: all_images[image_id].update(values) break ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8623035 glance-29.0.0/glance/db/sqlalchemy/0000775000175000017500000000000000000000000017036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021135 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8623035 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/0000775000175000017500000000000000000000000022666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/README0000664000175000017500000000004700000000000023547 0ustar00zuulzuul00000000000000Generic single-database configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/__init__.py0000664000175000017500000000632700000000000025007 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from alembic import config as alembic_config from alembic import migration as alembic_migration from alembic import script as alembic_script from sqlalchemy import MetaData, Table from glance.db.sqlalchemy import api as db_api def get_alembic_config(engine=None): """Return a valid alembic config object""" ini_path = os.path.join(os.path.dirname(__file__), 'alembic.ini') config = alembic_config.Config(os.path.abspath(ini_path)) # we don't want to use the logger configuration from the file, which is # only really intended for the CLI # https://stackoverflow.com/a/42691781/613428 config.attributes['configure_logger'] = False if engine is None: engine = db_api.get_engine() # str(sqlalchemy.engine.url.URL) returns a RFC-1738 quoted URL. # This means that a password like "foo@" will be turned into # "foo%40". This causes a problem for set_main_option() here # because that uses ConfigParser.set, which (by design) uses # *python* interpolation to write the string out ... where "%" is # the special python interpolation character! Avoid this # mismatch by quoting all %'s for the set below. quoted_engine_url = str(engine.url).replace('%', '%%') config.set_main_option('sqlalchemy.url', quoted_engine_url) return config def get_current_alembic_heads(): """Return current heads (if any) from the alembic migration table""" engine = db_api.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) heads = context.get_current_heads() def update_alembic_version(old, new): """Correct alembic head in order to upgrade DB using EMC method. :param:old: Actual alembic head :param:new: Expected alembic head to be updated """ meta = MetaData() alembic_version = Table( 'alembic_version', meta, autoload_with=engine) alembic_version.update().values( version_num=new).where( alembic_version.c.version_num == old).execute() if "pike01" in heads: update_alembic_version("pike01", "pike_contract01") elif "ocata01" in heads: update_alembic_version("ocata01", "ocata_contract01") heads = context.get_current_heads() return heads def get_alembic_branch_head(branch): """Return head revision name for particular branch""" a_config = get_alembic_config() script = alembic_script.ScriptDirectory.from_config(a_config) return script.revision_map.get_current_head(branch) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py0000664000175000017500000002426000000000000027366 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from sqlalchemy.schema import ( Column, PrimaryKeyConstraint, ForeignKeyConstraint) from glance.db.sqlalchemy.schema import ( Boolean, DateTime, Integer, BigInteger, String, Text, Numeric) # noqa def _add_artifacts_table(): op.create_table('artifacts', Column('id', String(length=36), nullable=False), Column('name', String(length=255), nullable=False), Column('type_name', String(length=255), nullable=False), Column('type_version_prefix', BigInteger(), nullable=False), Column('type_version_suffix', String(length=255), nullable=True), Column('type_version_meta', String(length=255), nullable=True), Column('version_prefix', BigInteger(), nullable=False), Column('version_suffix', String(length=255), nullable=True), Column('version_meta', String(length=255), nullable=True), Column('description', Text(), nullable=True), Column('visibility', String(length=32), nullable=False), Column('state', String(length=32), nullable=False), Column('owner', String(length=255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime(), nullable=True), Column('published_at', DateTime(), nullable=True), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_name_and_version', 'artifacts', ['name', 'version_prefix', 'version_suffix'], unique=False) op.create_index('ix_artifact_owner', 'artifacts', ['owner'], unique=False) op.create_index('ix_artifact_state', 'artifacts', ['state'], unique=False) op.create_index('ix_artifact_type', 'artifacts', ['type_name', 'type_version_prefix', 'type_version_suffix'], unique=False) op.create_index('ix_artifact_visibility', 'artifacts', ['visibility'], unique=False) def _add_artifact_blobs_table(): op.create_table('artifact_blobs', Column('id', String(length=36), nullable=False), Column('artifact_id', String(length=36), nullable=False), Column('size', BigInteger(), nullable=False), Column('checksum', String(length=32), nullable=True), Column('name', String(length=255), nullable=False), Column('item_key', String(length=329), nullable=True), Column('position', Integer(), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_blobs_artifact_id', 'artifact_blobs', ['artifact_id'], unique=False) op.create_index('ix_artifact_blobs_name', 'artifact_blobs', ['name'], unique=False) def _add_artifact_dependencies_table(): op.create_table('artifact_dependencies', Column('id', String(length=36), nullable=False), Column('artifact_source', String(length=36), nullable=False), Column('artifact_dest', String(length=36), nullable=False), Column('artifact_origin', String(length=36), nullable=False), Column('is_direct', Boolean(), nullable=False), Column('position', Integer(), nullable=True), Column('name', String(length=36), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), ForeignKeyConstraint(['artifact_dest'], ['artifacts.id'], ), ForeignKeyConstraint(['artifact_origin'], ['artifacts.id'], ), ForeignKeyConstraint(['artifact_source'], ['artifacts.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_dependencies_dest_id', 'artifact_dependencies', ['artifact_dest'], unique=False) op.create_index('ix_artifact_dependencies_direct_dependencies', 'artifact_dependencies', ['artifact_source', 'is_direct'], unique=False) op.create_index('ix_artifact_dependencies_origin_id', 'artifact_dependencies', ['artifact_origin'], unique=False) op.create_index('ix_artifact_dependencies_source_id', 'artifact_dependencies', ['artifact_source'], unique=False) def _add_artifact_properties_table(): op.create_table('artifact_properties', Column('id', String(length=36), nullable=False), Column('artifact_id', String(length=36), nullable=False), Column('name', String(length=255), nullable=False), Column('string_value', String(length=255), nullable=True), Column('int_value', Integer(), nullable=True), Column('numeric_value', Numeric(), nullable=True), Column('bool_value', Boolean(), nullable=True), Column('text_value', Text(), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('position', Integer(), nullable=True), ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_properties_artifact_id', 'artifact_properties', ['artifact_id'], unique=False) op.create_index('ix_artifact_properties_name', 'artifact_properties', ['name'], unique=False) def _add_artifact_tags_table(): op.create_table('artifact_tags', Column('id', String(length=36), nullable=False), Column('artifact_id', String(length=36), nullable=False), Column('value', String(length=255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), ForeignKeyConstraint(['artifact_id'], ['artifacts.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_tags_artifact_id', 'artifact_tags', ['artifact_id'], unique=False) op.create_index('ix_artifact_tags_artifact_id_tag_value', 'artifact_tags', ['artifact_id', 'value'], unique=False) def _add_artifact_blob_locations_table(): op.create_table('artifact_blob_locations', Column('id', String(length=36), nullable=False), Column('blob_id', String(length=36), nullable=False), Column('value', Text(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('position', Integer(), nullable=True), Column('status', String(length=36), nullable=True), ForeignKeyConstraint(['blob_id'], ['artifact_blobs.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_artifact_blob_locations_blob_id', 'artifact_blob_locations', ['blob_id'], unique=False) def upgrade(): _add_artifacts_table() _add_artifact_blobs_table() _add_artifact_dependencies_table() _add_artifact_properties_table() _add_artifact_tags_table() _add_artifact_blob_locations_table() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/add_images_tables.py0000664000175000017500000002162400000000000026654 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from sqlalchemy import sql from sqlalchemy.schema import ( Column, PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint) from glance.db.sqlalchemy.schema import ( Boolean, DateTime, Integer, BigInteger, String, Text) # noqa from glance.db.sqlalchemy.models import JSONEncodedDict def _add_images_table(): op.create_table('images', Column('id', String(length=36), nullable=False), Column('name', String(length=255), nullable=True), Column('size', BigInteger().with_variant(Integer, "sqlite"), nullable=True), Column('status', String(length=30), nullable=False), Column('is_public', Boolean(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), Column('disk_format', String(length=20), nullable=True), Column('container_format', String(length=20), nullable=True), Column('checksum', String(length=32), nullable=True), Column('owner', String(length=255), nullable=True), Column('min_disk', Integer(), nullable=False), Column('min_ram', Integer(), nullable=False), Column('protected', Boolean(), server_default=sql.false(), nullable=False), Column('virtual_size', BigInteger().with_variant(Integer, "sqlite"), nullable=True), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('checksum_image_idx', 'images', ['checksum'], unique=False) op.create_index('ix_images_deleted', 'images', ['deleted'], unique=False) op.create_index('ix_images_is_public', 'images', ['is_public'], unique=False) op.create_index('owner_image_idx', 'images', ['owner'], unique=False) def _add_image_properties_table(): op.create_table('image_properties', Column('id', Integer(), nullable=False), Column('image_id', String(length=36), nullable=False), Column('name', String(length=255), nullable=False), Column('value', Text(), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), PrimaryKeyConstraint('id'), ForeignKeyConstraint(['image_id'], ['images.id'], ), UniqueConstraint('image_id', 'name', name='ix_image_properties_image_id_name'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_image_properties_deleted', 'image_properties', ['deleted'], unique=False) op.create_index('ix_image_properties_image_id', 'image_properties', ['image_id'], unique=False) def _add_image_locations_table(): op.create_table('image_locations', Column('id', Integer(), nullable=False), Column('image_id', String(length=36), nullable=False), Column('value', Text(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), Column('meta_data', JSONEncodedDict(), nullable=True), Column('status', String(length=30), server_default='active', nullable=False), PrimaryKeyConstraint('id'), ForeignKeyConstraint(['image_id'], ['images.id'], ), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_image_locations_deleted', 'image_locations', ['deleted'], unique=False) op.create_index('ix_image_locations_image_id', 'image_locations', ['image_id'], unique=False) def _add_image_members_table(): deleted_member_constraint = 'image_members_image_id_member_deleted_at_key' op.create_table('image_members', Column('id', Integer(), nullable=False), Column('image_id', String(length=36), nullable=False), Column('member', String(length=255), nullable=False), Column('can_share', Boolean(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), Column('status', String(length=20), server_default='pending', nullable=False), ForeignKeyConstraint(['image_id'], ['images.id'], ), PrimaryKeyConstraint('id'), UniqueConstraint('image_id', 'member', 'deleted_at', name=deleted_member_constraint), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_image_members_deleted', 'image_members', ['deleted'], unique=False) op.create_index('ix_image_members_image_id', 'image_members', ['image_id'], unique=False) op.create_index('ix_image_members_image_id_member', 'image_members', ['image_id', 'member'], unique=False) def _add_images_tags_table(): op.create_table('image_tags', Column('id', Integer(), nullable=False), Column('image_id', String(length=36), nullable=False), Column('value', String(length=255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), ForeignKeyConstraint(['image_id'], ['images.id'], ), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_image_tags_image_id', 'image_tags', ['image_id'], unique=False) op.create_index('ix_image_tags_image_id_tag_value', 'image_tags', ['image_id', 'value'], unique=False) def upgrade(): _add_images_table() _add_image_properties_table() _add_image_locations_table() _add_image_members_table() _add_images_tags_table() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py0000664000175000017500000001730600000000000027201 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from sqlalchemy.schema import ( Column, PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint) from glance.db.sqlalchemy.schema import ( Boolean, DateTime, Integer, String, Text) # noqa from glance.db.sqlalchemy.models import JSONEncodedDict def _add_metadef_namespaces_table(): op.create_table('metadef_namespaces', Column('id', Integer(), nullable=False), Column('namespace', String(length=80), nullable=False), Column('display_name', String(length=80), nullable=True), Column('description', Text(), nullable=True), Column('visibility', String(length=32), nullable=True), Column('protected', Boolean(), nullable=True), Column('owner', String(length=255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), PrimaryKeyConstraint('id'), UniqueConstraint('namespace', name='uq_metadef_namespaces_namespace'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_metadef_namespaces_owner', 'metadef_namespaces', ['owner'], unique=False) def _add_metadef_resource_types_table(): op.create_table('metadef_resource_types', Column('id', Integer(), nullable=False), Column('name', String(length=80), nullable=False), Column('protected', Boolean(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), PrimaryKeyConstraint('id'), UniqueConstraint('name', name='uq_metadef_resource_types_name'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) def _add_metadef_namespace_resource_types_table(): op.create_table('metadef_namespace_resource_types', Column('resource_type_id', Integer(), nullable=False), Column('namespace_id', Integer(), nullable=False), Column('properties_target', String(length=80), nullable=True), Column('prefix', String(length=80), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), ForeignKeyConstraint(['namespace_id'], ['metadef_namespaces.id'], ), ForeignKeyConstraint(['resource_type_id'], ['metadef_resource_types.id'], ), PrimaryKeyConstraint('resource_type_id', 'namespace_id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_metadef_ns_res_types_namespace_id', 'metadef_namespace_resource_types', ['namespace_id'], unique=False) def _add_metadef_objects_table(): ns_id_name_constraint = 'uq_metadef_objects_namespace_id_name' op.create_table('metadef_objects', Column('id', Integer(), nullable=False), Column('namespace_id', Integer(), nullable=False), Column('name', String(length=80), nullable=False), Column('description', Text(), nullable=True), Column('required', Text(), nullable=True), Column('json_schema', JSONEncodedDict(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), ForeignKeyConstraint(['namespace_id'], ['metadef_namespaces.id'], ), PrimaryKeyConstraint('id'), UniqueConstraint('namespace_id', 'name', name=ns_id_name_constraint), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_metadef_objects_name', 'metadef_objects', ['name'], unique=False) def _add_metadef_properties_table(): ns_id_name_constraint = 'uq_metadef_properties_namespace_id_name' op.create_table('metadef_properties', Column('id', Integer(), nullable=False), Column('namespace_id', Integer(), nullable=False), Column('name', String(length=80), nullable=False), Column('json_schema', JSONEncodedDict(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), ForeignKeyConstraint(['namespace_id'], ['metadef_namespaces.id'], ), PrimaryKeyConstraint('id'), UniqueConstraint('namespace_id', 'name', name=ns_id_name_constraint), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_metadef_properties_name', 'metadef_properties', ['name'], unique=False) def _add_metadef_tags_table(): op.create_table('metadef_tags', Column('id', Integer(), nullable=False), Column('namespace_id', Integer(), nullable=False), Column('name', String(length=80), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), ForeignKeyConstraint(['namespace_id'], ['metadef_namespaces.id'], ), PrimaryKeyConstraint('id'), UniqueConstraint('namespace_id', 'name', name='uq_metadef_tags_namespace_id_name'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_metadef_tags_name', 'metadef_tags', ['name'], unique=False) def upgrade(): _add_metadef_namespaces_table() _add_metadef_resource_types_table() _add_metadef_namespace_resource_types_table() _add_metadef_objects_table() _add_metadef_properties_table() _add_metadef_tags_table() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py0000664000175000017500000000543600000000000026537 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op from sqlalchemy.schema import ( Column, PrimaryKeyConstraint, ForeignKeyConstraint) from glance.db.sqlalchemy.schema import ( Boolean, DateTime, String, Text) # noqa from glance.db.sqlalchemy.models import JSONEncodedDict def _add_tasks_table(): op.create_table('tasks', Column('id', String(length=36), nullable=False), Column('type', String(length=30), nullable=False), Column('status', String(length=30), nullable=False), Column('owner', String(length=255), nullable=False), Column('expires_at', DateTime(), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=True), Column('deleted_at', DateTime(), nullable=True), Column('deleted', Boolean(), nullable=False), PrimaryKeyConstraint('id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) op.create_index('ix_tasks_deleted', 'tasks', ['deleted'], unique=False) op.create_index('ix_tasks_owner', 'tasks', ['owner'], unique=False) op.create_index('ix_tasks_status', 'tasks', ['status'], unique=False) op.create_index('ix_tasks_type', 'tasks', ['type'], unique=False) op.create_index('ix_tasks_updated_at', 'tasks', ['updated_at'], unique=False) def _add_task_info_table(): op.create_table('task_info', Column('task_id', String(length=36), nullable=False), Column('input', JSONEncodedDict(), nullable=True), Column('result', JSONEncodedDict(), nullable=True), Column('message', Text(), nullable=True), ForeignKeyConstraint(['task_id'], ['tasks.id'], ), PrimaryKeyConstraint('task_id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) def upgrade(): _add_tasks_table() _add_task_info_table() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/alembic.ini0000664000175000017500000000302500000000000024763 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; this defaults # to alembic_migrations/versions. When using multiple version # directories, initial revisions must be specified with --version-path # version_locations = %(here)s/bar %(here)s/bat alembic_migrations/versions # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 # Uncomment and update to your sql connection string if wishing to run # alembic directly from command line #sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.866304 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/0000775000175000017500000000000000000000000026033 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/2023_1_migrate01_empty.py0000664000175000017500000000147200000000000032306 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/2024_1_migrate01_empty.py0000664000175000017500000000147200000000000032307 0ustar00zuulzuul00000000000000# Copyright (C) 2023 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py0000664000175000017500000000443300000000000030150 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import os.path import pkgutil from glance.common import exception from glance.db import migration as db_migrations from glance.db.sqlalchemy import api as db_api def _find_migration_modules(release): migrations = list() for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(__file__)]): if module_name.startswith(release): migrations.append(module_name) migration_modules = list() for migration in sorted(migrations): module = importlib.import_module('.'.join([__package__, migration])) has_migrations_function = getattr(module, 'has_migrations', None) migrate_function = getattr(module, 'migrate', None) if has_migrations_function is None or migrate_function is None: raise exception.InvalidDataMigrationScript(script=module.__name__) migration_modules.append(module) return migration_modules def _run_migrations(engine, migrations): rows_migrated = 0 for migration in migrations: if migration.has_migrations(engine): rows_migrated += migration.migrate(engine) return rows_migrated def has_pending_migrations(engine=None, release=db_migrations.CURRENT_RELEASE): if not engine: engine = db_api.get_engine() migrations = _find_migration_modules(release) if not migrations: return False return any([x.has_migrations(engine) for x in migrations]) def migrate(engine=None, release=db_migrations.CURRENT_RELEASE): if not engine: engine = db_api.get_engine() migrations = _find_migration_modules(release) rows_migrated = _run_migrations(engine, migrations) return rows_migrated ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_imag0000664000175000017500000000755500000000000034033 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, select, Table, and_, not_ def has_migrations(engine): """Returns true if at least one data row can be migrated. There are rows left to migrate if: #1 There exists a row with visibility not set yet. Or #2 There exists a private image with active members but its visibility isn't set to 'shared' yet. Note: This method can return a false positive if data migrations are running in the background as it's being called. """ meta = MetaData() images = Table('images', meta, autoload_with=engine) with engine.connect() as conn: rows_with_null_visibility = conn.execute( select(images.c.id) .where(images.c.visibility == None) .limit(1) ) if rows_with_null_visibility.rowcount == 1: return True image_members = Table('image_members', meta, autoload_with=engine) with engine.connect() as conn: rows_with_pending_shared = conn.execute( select(images.c.id).where( and_( images.c.visibility == 'private', images.c.id.in_( select( image_members.c.image_id ).distinct().where(not_(image_members.c.deleted)) ) ) ).limit(1) ) if rows_with_pending_shared.rowcount == 1: return True return False def _mark_all_public_images_with_public_visibility(engine, images): with engine.connect() as conn: migrated_rows = conn.execute( images.update().values( visibility='public' ).where(images.c.is_public) ) return migrated_rows.rowcount def _mark_all_non_public_images_with_private_visibility(engine, images): with engine.connect() as conn: migrated_rows = conn.execute( images .update().values(visibility='private') .where(not_(images.c.is_public)) ) return migrated_rows.rowcount def _mark_all_private_images_with_members_as_shared_visibility( engine, images, image_members, ): with engine.connect() as conn: migrated_rows = conn.execute( images.update().values( visibility='shared' ) .where( and_( images.c.visibility == 'private', images.c.id.in_( select(image_members.c.image_id).distinct().where( not_(image_members.c.deleted) ) ) ) ) ) return migrated_rows.rowcount def _migrate_all(engine): meta = MetaData() images = Table('images', meta, autoload_with=engine) image_members = Table('image_members', meta, autoload_with=engine) num_rows = _mark_all_public_images_with_public_visibility(images) num_rows += _mark_all_non_public_images_with_private_visibility(images) num_rows += _mark_all_private_images_with_members_as_shared_visibility( images, image_members) return num_rows def migrate(engine): """Set visibility column based on is_public and image members.""" return _migrate_all(engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py0000664000175000017500000000161100000000000032423 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(rosmaita): This file implements the migration interface, but doesn't # migrate any data. The pike01 migration is contract-only. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/queens_migrate01_empty.py0000664000175000017500000000146700000000000033004 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/rocky_migrate01_empty.py0000664000175000017500000000147200000000000032627 0ustar00zuulzuul00000000000000# Copyright (C) 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/rocky_migrate02_empty.py0000664000175000017500000000147700000000000032635 0ustar00zuulzuul00000000000000# Copyright (C) 2018 Verizon Wireless # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/train_migrate01_backend_to_store.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/train_migrate01_backend_to_sto0000664000175000017500000000435300000000000034007 0ustar00zuulzuul00000000000000# Copyright 2019 RedHat Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import sql def has_migrations(engine): """Returns true if at least one data row can be migrated. There are rows left to migrate if meta_data column has {"backend": "...."} Note: This method can return a false positive if data migrations are running in the background as it's being called. """ sql_query = sql.text( "select meta_data from image_locations where " "INSTR(meta_data, '\"backend\":') > 0" ) # NOTE(abhishekk): INSTR function doesn't supported in postgresql if engine.name == 'postgresql': sql_query = sql.text( "select meta_data from image_locations where " "POSITION('\"backend\":' IN meta_data) > 0" ) with engine.connect() as conn, conn.begin(): metadata_backend = conn.execute(sql_query) if metadata_backend.rowcount > 0: return True return False def migrate(engine): """Replace 'backend' with 'store' in meta_data column of image_locations""" sql_query = sql.text( "UPDATE image_locations SET meta_data = REPLACE(meta_data, " "'\"backend\":', '\"store\":') where INSTR(meta_data, " " '\"backend\":') > 0" ) # NOTE(abhishekk): INSTR function doesn't supported in postgresql if engine.name == 'postgresql': sql_query = sql.text( "UPDATE image_locations SET meta_data = REPLACE(" "meta_data, '\"backend\":', '\"store\":') where " "POSITION('\"backend\":' IN meta_data) > 0" ) with engine.connect() as conn, conn.begin(): migrated_rows = conn.execute(sql_query) return migrated_rows.rowcount ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/ussuri_migrate01_empty.py0000664000175000017500000000147200000000000033032 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/wallaby_migrate01_empty.py0000664000175000017500000000147200000000000033133 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/xena_migrate01_empty.py0000664000175000017500000000147200000000000032433 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/yoga_migrate01_empty.py0000664000175000017500000000147200000000000032437 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/data_migrations/zed_migrate01_empty.py0000664000175000017500000000147200000000000032262 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def has_migrations(engine): """Returns true if at least one data row can be migrated.""" return False def migrate(engine): """Return the number of rows migrated.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/env.py0000664000175000017500000000516500000000000024037 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from glance.db.sqlalchemy import models from glance.db.sqlalchemy import models_metadef # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config CONF = cfg.CONF # Interpret the config file for Python logging unless we're told not to. # This line sets up loggers basically. if config.attributes.get('configure_logger', True): log_config.fileConfig(config.config_file_name) # this is the MetaData object for the various models in the database target_metadata = models.BASE.metadata for table in models_metadef.BASE_DICT.metadata.sorted_tables: target_metadata._add_table(table.name, table.schema, table) def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = CONF.database.connection context.configure( url=url, render_as_batch=True, target_metadata=target_metadata, literal_binds=True, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = enginefacade.writer.get_engine() with engine.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/migrate.cfg0000664000175000017500000000174100000000000025002 0ustar00zuulzuul00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=Glance Migrations # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=alembic_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/script.py.mako0000664000175000017500000000065700000000000025502 0ustar00zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8703046 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/0000775000175000017500000000000000000000000024536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_contract01_empty.py0000664000175000017500000000145600000000000031200 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = '2023_1_contract01' down_revision = 'zed_contract01' branch_labels = None depends_on = '2023_1_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/2023_1_expand01_empty.py0000664000175000017500000000163100000000000030635 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with 2023_1_expand01 Revision ID: ussuri_expand01 Revises: train_expand01 Create Date: 2020-01-03 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = '2023_1_expand01' down_revision = 'zed_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/2024_1_contract01_empty.py0000664000175000017500000000146100000000000031175 0ustar00zuulzuul00000000000000# Copyright (C) 2023 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = '2024_1_contract01' down_revision = '2023_1_contract01' branch_labels = None depends_on = '2024_1_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/2024_1_expand01_add_cache_tables.py0000664000175000017500000000635200000000000032712 0ustar00zuulzuul00000000000000# Copyright (C) 2023 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adds cache_node_reference and cached_images table(s) Revision ID: 2024_1_expand01 Revises: 2023_1_expand01 Create Date: 2023-10-31 11:55:16.657499 """ from alembic import op from sqlalchemy.schema import ( Column, PrimaryKeyConstraint, ForeignKeyConstraint, UniqueConstraint) from glance.db.sqlalchemy.schema import ( Integer, BigInteger, DateTime, String) # noqa # revision identifiers, used by Alembic. revision = '2024_1_expand01' down_revision = '2023_1_expand01' branch_labels = None depends_on = None def _add_node_reference_table(): op.create_table('node_reference', Column('node_reference_id', BigInteger().with_variant(Integer, 'sqlite'), nullable=False, autoincrement=True), Column('node_reference_url', String(length=255), nullable=False), PrimaryKeyConstraint('node_reference_id'), UniqueConstraint( 'node_reference_url', name='uq_node_reference_node_reference_url'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) def _add_cached_images_table(): op.create_table('cached_images', Column('id', BigInteger().with_variant(Integer, 'sqlite'), autoincrement=True, nullable=False), Column('image_id', String(length=36), nullable=False), Column('last_accessed', DateTime(), nullable=False), Column('last_modified', DateTime(), nullable=False), Column('size', BigInteger(), nullable=False), Column('hits', Integer(), nullable=False), Column('checksum', String(length=32), nullable=True), Column('node_reference_id', BigInteger().with_variant(Integer, 'sqlite'), nullable=False), PrimaryKeyConstraint('id'), ForeignKeyConstraint( ['node_reference_id'], ['node_reference.node_reference_id'], ), UniqueConstraint( 'image_id', 'node_reference_id', name='ix_cached_images_image_id_node_reference_id'), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) def upgrade(): _add_node_reference_table() _add_cached_images_table() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/__init__.py0000664000175000017500000000000000000000000026635 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py0000664000175000017500000000241100000000000030271 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """liberty initial Revision ID: liberty Revises: Create Date: 2016-08-03 16:06:59.657433 """ from glance.db.sqlalchemy.alembic_migrations import add_artifacts_tables from glance.db.sqlalchemy.alembic_migrations import add_images_tables from glance.db.sqlalchemy.alembic_migrations import add_metadefs_tables from glance.db.sqlalchemy.alembic_migrations import add_tasks_tables # revision identifiers, used by Alembic. revision = 'liberty' down_revision = None branch_labels = None depends_on = None def upgrade(): add_images_tables.upgrade() add_tasks_tables.upgrade() add_metadefs_tables.upgrade() add_artifacts_tables.upgrade() ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_id0000664000175000017500000000230700000000000033715 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add index on created_at and updated_at columns of 'images' table Revision ID: mitaka01 Revises: liberty Create Date: 2016-08-03 17:19:35.306161 """ from alembic import op # revision identifiers, used by Alembic. revision = 'mitaka01' down_revision = 'liberty' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('images') as batch_op: batch_op.create_index( 'created_at_image_idx', ['created_at'], ) batch_op.create_index( 'updated_at_image_idx', ['updated_at'], ) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_serve0000664000175000017500000000240300000000000034047 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update metadef os_nova_server Revision ID: mitaka02 Revises: mitaka01 Create Date: 2016-08-03 17:23:23.041663 """ from alembic import op from sqlalchemy import MetaData, Table # revision identifiers, used by Alembic. revision = 'mitaka02' down_revision = 'mitaka01' branch_labels = None depends_on = None def upgrade(): bind = op.get_bind() meta = MetaData() resource_types_table = Table( 'metadef_resource_types', meta, autoload_with=bind) op.execute( resource_types_table.update().where( resource_types_table.c.name == 'OS::Nova::Instance' ).values(name='OS::Nova::Server') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py0000664000175000017500000000425100000000000033314 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove is_public from images Revision ID: ocata_contract01 Revises: mitaka02 Create Date: 2017-01-27 12:58:16.647499 """ from alembic import op from sqlalchemy import Enum from glance.cmd import manage from glance.db import migration # revision identifiers, used by Alembic. revision = 'ocata_contract01' down_revision = 'mitaka02' branch_labels = ('ocata01', migration.CONTRACT_BRANCH) depends_on = 'ocata_expand01' MYSQL_DROP_INSERT_TRIGGER = """ DROP TRIGGER insert_visibility; """ MYSQL_DROP_UPDATE_TRIGGER = """ DROP TRIGGER update_visibility; """ def _drop_column(): with op.batch_alter_table('images') as batch_op: batch_op.drop_index('ix_images_is_public') batch_op.drop_column('is_public') def _drop_triggers(connection): engine_name = connection.engine.name if engine_name == "mysql": op.execute(MYSQL_DROP_INSERT_TRIGGER) op.execute(MYSQL_DROP_UPDATE_TRIGGER) def _set_nullability_and_default_on_visibility(): # NOTE(hemanthm): setting the default on 'visibility' column # to 'shared'. Also, marking it as non-nullable. existing_type = Enum('private', 'public', 'shared', 'community', name='image_visibility') with op.batch_alter_table('images') as batch_op: batch_op.alter_column('visibility', nullable=False, server_default='shared', existing_type=existing_type) def upgrade(): bind = op.get_bind() _drop_column() if manage.USE_TRIGGERS: _drop_triggers(bind) _set_nullability_and_default_on_visibility() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py0000664000175000017500000001334100000000000032760 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add visibility to images Revision ID: ocata_expand01 Revises: mitaka02 Create Date: 2017-01-27 12:58:16.647499 """ from alembic import op from sqlalchemy import Column, Enum from glance.cmd import manage from glance.db import migration from glance.db.sqlalchemy.schema import Boolean # revision identifiers, used by Alembic. revision = 'ocata_expand01' down_revision = 'mitaka02' branch_labels = migration.EXPAND_BRANCH depends_on = None ERROR_MESSAGE = 'Invalid visibility value' MYSQL_INSERT_TRIGGER = """ CREATE TRIGGER insert_visibility BEFORE INSERT ON images FOR EACH ROW BEGIN -- NOTE(abashmak): -- The following IF/ELSE block implements a priority decision tree. -- Strict order MUST be followed to correctly cover all the edge cases. -- Edge case: neither is_public nor visibility specified -- (or both specified as NULL): IF NEW.is_public <=> NULL AND NEW.visibility <=> NULL THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Edge case: both is_public and visibility specified: ELSEIF NOT(NEW.is_public <=> NULL OR NEW.visibility <=> NULL) THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Inserting with is_public, set visibility accordingly: ELSEIF NOT NEW.is_public <=> NULL THEN IF NEW.is_public = 1 THEN SET NEW.visibility = 'public'; ELSE SET NEW.visibility = 'shared'; END IF; -- Inserting with visibility, set is_public accordingly: ELSEIF NOT NEW.visibility <=> NULL THEN IF NEW.visibility = 'public' THEN SET NEW.is_public = 1; ELSE SET NEW.is_public = 0; END IF; -- Edge case: either one of: is_public or visibility, -- is explicitly set to NULL: ELSE SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; END IF; END; """ MYSQL_UPDATE_TRIGGER = """ CREATE TRIGGER update_visibility BEFORE UPDATE ON images FOR EACH ROW BEGIN -- Case: new value specified for is_public: IF NOT NEW.is_public <=> OLD.is_public THEN -- Edge case: is_public explicitly set to NULL: IF NEW.is_public <=> NULL THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Edge case: new value also specified for visibility ELSEIF NOT NEW.visibility <=> OLD.visibility THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Case: visibility not specified or specified as OLD value: -- NOTE(abashmak): There is no way to reliably determine which -- of the above two cases occurred, but allowing to proceed with -- the update in either case does not break the model for both -- N and N-1 services. ELSE -- Set visibility according to the value of is_public: IF NEW.is_public <=> 1 THEN SET NEW.visibility = 'public'; ELSE SET NEW.visibility = 'shared'; END IF; END IF; -- Case: new value specified for visibility: ELSEIF NOT NEW.visibility <=> OLD.visibility THEN -- Edge case: visibility explicitly set to NULL: IF NEW.visibility <=> NULL THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Edge case: new value also specified for is_public ELSEIF NOT NEW.is_public <=> OLD.is_public THEN SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s'; -- Case: is_public not specified or specified as OLD value: -- NOTE(abashmak): There is no way to reliably determine which -- of the above two cases occurred, but allowing to proceed with -- the update in either case does not break the model for both -- N and N-1 services. ELSE -- Set is_public according to the value of visibility: IF NEW.visibility <=> 'public' THEN SET NEW.is_public = 1; ELSE SET NEW.is_public = 0; END IF; END IF; END IF; END; """ def _add_visibility_column(bind): enum = Enum('private', 'public', 'shared', 'community', name='image_visibility') enum.create(bind=bind) v_col = Column('visibility', enum, nullable=True, server_default=None) op.add_column('images', v_col) op.create_index('visibility_image_idx', 'images', ['visibility']) def _add_triggers(connection): if connection.engine.name == 'mysql': op.execute(MYSQL_INSERT_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE, ERROR_MESSAGE)) op.execute(MYSQL_UPDATE_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE, ERROR_MESSAGE, ERROR_MESSAGE)) def _change_nullability_and_default_on_is_public(): # NOTE(hemanthm): we mark is_public as nullable so that when new versions # add data only to be visibility column, is_public can be null. with op.batch_alter_table('images') as batch_op: batch_op.alter_column( 'is_public', nullable=True, server_default=None, existing_type=Boolean()) def upgrade(): bind = op.get_bind() _add_visibility_column(bind) _change_nullability_and_default_on_is_public() if manage.USE_TRIGGERS: _add_triggers(bind) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables0000664000175000017500000000247100000000000034071 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """drop glare artifacts tables Revision ID: pike_contract01 Revises: ocata_contract01 Create Date: 2017-02-09 20:32:51.222867 """ from alembic import op # revision identifiers, used by Alembic. revision = 'pike_contract01' down_revision = 'ocata_contract01' branch_labels = None depends_on = 'pike_expand01' def upgrade(): # create list of artifact tables in reverse order of their creation table_names = [] table_names.append('artifact_blob_locations') table_names.append('artifact_properties') table_names.append('artifact_blobs') table_names.append('artifact_dependencies') table_names.append('artifact_tags') table_names.append('artifacts') for table_name in table_names: op.drop_table(table_name=table_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py0000664000175000017500000000156700000000000030767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with pike_contract01 Revision ID: pike_expand01 Revises: ocata_expand01 Create Date: 2017-02-09 19:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'pike_expand01' down_revision = 'ocata_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/queens_contract01_empty.py0000664000175000017500000000145500000000000031671 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'queens_contract01' down_revision = 'pike_contract01' branch_labels = None depends_on = 'queens_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/queens_expand01_empty.py0000664000175000017500000000143400000000000031330 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'queens_expand01' down_revision = 'pike_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/rocky_contract01_empty.py0000664000175000017500000000146000000000000031514 0ustar00zuulzuul00000000000000# Copyright (C) 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'rocky_contract01' down_revision = 'queens_contract01' branch_labels = None depends_on = 'rocky_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/rocky_contract02_empty.py0000664000175000017500000000146400000000000031521 0ustar00zuulzuul00000000000000# Copyright (C) 2018 Verizon Wireless # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'rocky_contract02' down_revision = 'rocky_contract01' branch_labels = None depends_on = 'rocky_expand02' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/rocky_expand01_add_os_hidden.py0000664000175000017500000000215700000000000032570 0ustar00zuulzuul00000000000000# Copyright (C) 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add os_hidden column to images table""" from alembic import op from sqlalchemy import Boolean, Column, sql # revision identifiers, used by Alembic. revision = 'rocky_expand01' down_revision = 'queens_expand01' branch_labels = None depends_on = None def upgrade(): h_col = Column('os_hidden', Boolean, default=False, nullable=False, server_default=sql.expression.false()) op.add_column('images', h_col) op.create_index('os_hidden_image_idx', 'images', ['os_hidden']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/rocky_expand02_add_os_hash_.py0000664000175000017500000000230600000000000032414 0ustar00zuulzuul00000000000000# Copyright (C) 2018 Verizon Wireless # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add os_hash_algo and os_hash_value columns to images table""" from alembic import op from sqlalchemy import Column, String # revision identifiers, used by Alembic. revision = 'rocky_expand02' down_revision = 'rocky_expand01' branch_labels = None depends_on = None def upgrade(): algo_col = Column('os_hash_algo', String(length=64), nullable=True) value_col = Column('os_hash_value', String(length=128), nullable=True) op.add_column('images', algo_col) op.add_column('images', value_col) op.create_index('os_hash_value_image_idx', 'images', ['os_hash_value']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/train_contract01_empty.py0000664000175000017500000000145600000000000031507 0ustar00zuulzuul00000000000000# Copyright (C) 2019 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'train_contract01' down_revision = 'rocky_contract02' branch_labels = None depends_on = 'train_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/train_expand01_empty.py0000664000175000017500000000163200000000000031145 0ustar00zuulzuul00000000000000# Copyright (C) 2019 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with train_contract01 Revision ID: train_expand01 Revises: rocky_expand02 Create Date: 2019-06-17 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'train_expand01' down_revision = 'rocky_expand02' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/ussuri_contract01_empty.py0000664000175000017500000000146000000000000031717 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'ussuri_contract01' down_revision = 'train_contract01' branch_labels = None depends_on = 'ussuri_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/ussuri_expand01_empty.py0000664000175000017500000000163300000000000031363 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with ussuri_expand01 Revision ID: ussuri_expand01 Revises: train_expand01 Create Date: 2020-01-03 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'ussuri_expand01' down_revision = 'train_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/wallaby_contract01_empty.py0000664000175000017500000000146300000000000032023 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'wallaby_contract01' down_revision = 'ussuri_contract01' branch_labels = None depends_on = 'wallaby_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/wallaby_expand01_add_user_imageid_requestid_to_tasks.py 22 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/wallaby_expand01_add_user_imageid_req0000664000175000017500000000255400000000000034016 0ustar00zuulzuul00000000000000# Copyright (C) 2021 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add image_id, request_id, user columns to tasks table" Revision ID: wallaby_expand01 Revises: ussuri_expand01 Create Date: 2021-02-04 11:55:16.657499 """ from alembic import op from sqlalchemy import String, Column # revision identifiers, used by Alembic. revision = 'wallaby_expand01' down_revision = 'ussuri_expand01' branch_labels = None depends_on = None def upgrade(): image_id_col = Column('image_id', String(length=36), nullable=True) request_id_col = Column('request_id', String(length=64), nullable=True) user_col = Column('user_id', String(length=64), nullable=True) op.add_column('tasks', image_id_col) op.add_column('tasks', request_id_col) op.add_column('tasks', user_col) op.create_index('ix_tasks_image_id', 'tasks', ['image_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/xena_contract01_empty.py0000664000175000017500000000145600000000000031325 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'xena_contract01' down_revision = 'wallaby_contract01' branch_labels = None depends_on = 'xena_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/xena_expand01_empty.py0000664000175000017500000000163300000000000030764 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with 2023_1_expand01 Revision ID: ussuri_expand01 Revises: train_expand01 Create Date: 2020-01-03 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'xena_expand01' down_revision = 'wallaby_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/yoga_contract01_empty.py0000664000175000017500000000145300000000000031326 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'yoga_contract01' down_revision = 'xena_contract01' branch_labels = None depends_on = 'yoga_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/yoga_expand01_empty.py0000664000175000017500000000163000000000000030765 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with 2023_1_expand01 Revision ID: ussuri_expand01 Revises: train_expand01 Create Date: 2020-01-03 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'yoga_expand01' down_revision = 'xena_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/zed_contract01_empty.py0000664000175000017500000000145100000000000031147 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # revision identifiers, used by Alembic. revision = 'zed_contract01' down_revision = 'yoga_contract01' branch_labels = None depends_on = 'zed_expand01' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/alembic_migrations/versions/zed_expand01_empty.py0000664000175000017500000000162700000000000030616 0ustar00zuulzuul00000000000000# Copyright (C) 2020 RedHat Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """empty expand for symmetry with 2023_1_expand01 Revision ID: ussuri_expand01 Revises: train_expand01 Create Date: 2020-01-03 11:55:16.657499 """ # revision identifiers, used by Alembic. revision = 'zed_expand01' down_revision = 'yoga_expand01' branch_labels = None depends_on = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/api.py0000664000175000017500000026325300000000000020174 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # Copyright 2013 IBM Corp. # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access.""" import datetime import itertools import threading from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db.sqlalchemy import session as oslo_db_session from oslo_log import log as logging from oslo_utils import excutils import osprofiler.sqlalchemy from retrying import retry import sqlalchemy from sqlalchemy.ext.compiler import compiles from sqlalchemy import MetaData, Table import sqlalchemy.orm as sa_orm from sqlalchemy import sql import sqlalchemy.sql as sa_sql from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.db.sqlalchemy.metadef_api import (resource_type as metadef_resource_type_api) from glance.db.sqlalchemy.metadef_api import (resource_type_association as metadef_association_api) from glance.db.sqlalchemy.metadef_api import namespace as metadef_namespace_api from glance.db.sqlalchemy.metadef_api import object as metadef_object_api from glance.db.sqlalchemy.metadef_api import property as metadef_property_api from glance.db.sqlalchemy.metadef_api import tag as metadef_tag_api from glance.db.sqlalchemy import models from glance.db import utils as db_utils from glance.i18n import _, _LW, _LI, _LE sa_logger = None LOG = logging.getLogger(__name__) STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted', 'deactivated', 'importing', 'uploading'] CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") _FACADE = None _LOCK = threading.Lock() def _retry_on_deadlock(exc): """Decorator to retry a DB API call if Deadlock was received.""" if isinstance(exc, db_exception.DBDeadlock): LOG.warning(_LW("Deadlock detected. Retrying...")) return True return False def _create_facade_lazily(): global _LOCK, _FACADE if _FACADE is None: with _LOCK: if _FACADE is None: _FACADE = oslo_db_session.EngineFacade.from_config( CONF, sqlite_fk=True, ) if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: osprofiler.sqlalchemy.add_tracing(sqlalchemy, _FACADE.get_engine(), "db") return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(expire_on_commit=False): facade = _create_facade_lazily() return facade.get_session(autocommit=False, expire_on_commit=expire_on_commit) def _validate_db_int(**kwargs): """Make sure that all arguments are less than or equal to 2 ** 31 - 1. This limitation is introduced because databases stores INT in 4 bytes. If the validation fails for some argument, exception.Invalid is raised with appropriate information. """ max_int = (2 ** 31) - 1 for param_key, param_value in kwargs.items(): if param_value and param_value > max_int: msg = _("'%(param)s' value out of range, " "must not exceed %(max)d.") % {"param": param_key, "max": max_int} raise exception.Invalid(msg) def clear_db_env(): """ Unset global configuration variables for database. """ global _FACADE _FACADE = None def _check_mutate_authorization(context, image_ref): if not is_image_mutable(context, image_ref): LOG.warning(_LW("Attempted to modify image user did not own.")) msg = _("You do not own this image") if image_ref.visibility in ['private', 'shared']: exc_class = exception.Forbidden else: # 'public', or 'community' exc_class = exception.ForbiddenPublicImage raise exc_class(msg) @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def image_create(context, values, v1_mode=False): """Create an image from the values dictionary.""" session = get_session() with session.begin(): image_ref = _image_update( context, session, None, values, purge_props=False) session.expire_all() with session.begin(): image = _image_get(context, session, image_ref.id) image = _normalize_locations(context, image.to_dict()) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def image_update(context, image_id, values, purge_props=False, from_state=None, v1_mode=False, atomic_props=None): """ Set the given properties on an image and update it. :raises: ImageNotFound if image does not exist. """ session = get_session() with session.begin(): image_ref = _image_update( context, session, image_id, values, purge_props, from_state=from_state, atomic_props=atomic_props) session.expire_all() with session.begin(): image = _image_get(context, session, image_ref.id) image = _normalize_locations(context, image.to_dict()) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image def image_restore(context, image_id): """Restore the pending-delete image to active.""" session = get_session() with session.begin(): image_ref = _image_get(context, session, image_id) if image_ref.status != 'pending_delete': msg = (_('cannot restore the image from %s to active (wanted ' 'from_state=pending_delete)') % image_ref.status) raise exception.Conflict(msg) query = session.query(models.Image).filter_by(id=image_id) values = {'status': 'active', 'deleted': 0} query.update(values, synchronize_session='fetch') @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def image_destroy(context, image_id): """Destroy the image or raise if it does not exist.""" session = get_session() with session.begin(): image_ref = _image_get(context, session, image_id) # Perform authorization check _check_mutate_authorization(context, image_ref) image_ref.delete(session=session) delete_time = image_ref.deleted_at _image_locations_delete_all(context, session, image_id, delete_time) _image_property_delete_all(context, session, image_id, delete_time) _image_member_delete_all(context, session, image_id, delete_time) _image_tag_delete_all(context, session, image_id, delete_time) return _normalize_locations(context, image_ref.to_dict()) def _normalize_locations(context, image, force_show_deleted=False): """ Generate suitable dictionary list for locations field of image. We don't need to set other data fields of location record which return from image query. """ if image['status'] == 'deactivated' and not context.is_admin: # Locations are not returned for a deactivated image for non-admin user image['locations'] = [] return image if force_show_deleted: locations = image['locations'] else: locations = [x for x in image['locations'] if not x.deleted] image['locations'] = [{'id': loc['id'], 'url': loc['value'], 'metadata': loc['meta_data'], 'status': loc['status']} for loc in locations] return image def _normalize_tags(image): undeleted_tags = [x for x in image['tags'] if not x.deleted] image['tags'] = [tag['value'] for tag in undeleted_tags] return image def image_get(context, image_id, force_show_deleted=False, v1_mode=False): session = get_session() with session.begin(): image = _image_get(context, session, image_id, force_show_deleted=force_show_deleted) image = _normalize_locations(context, image.to_dict(), force_show_deleted=force_show_deleted) if v1_mode: image = db_utils.mutate_image_dict_to_v1(image) return image def _check_image_id(image_id): """ check if the given image id is valid before executing operations. For now, we only check its length. The original purpose of this method is wrapping the different behaviors between MySql and DB2 when the image id length is longer than the defined length in database model. :param image_id: The id of the image we want to check :returns: Raise NoFound exception if given image id is invalid """ if (image_id and len(image_id) > models.Image.id.property.columns[0].type.length): raise exception.ImageNotFound() def _image_get(context, session, image_id, force_show_deleted=False): """Get an image or raise if it does not exist.""" _check_image_id(image_id) try: query = session.query(models.Image).options( sa_orm.joinedload(models.Image.properties)).options( sa_orm.joinedload( models.Image.locations)).filter_by(id=image_id) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) image = query.one() except sa_orm.exc.NoResultFound: msg = "No image found with ID %s" % image_id LOG.debug(msg) raise exception.ImageNotFound(msg) # Make sure they can look at it if not is_image_visible(context, image): msg = "Forbidding request, image %s not visible" % image_id LOG.debug(msg) raise exception.Forbidden(msg) return image def is_image_mutable(context, image): """Return True if the image is mutable in this context.""" # Is admin == image mutable if context.is_admin: return True # No owner == image not mutable if image['owner'] is None or context.owner is None: return False # Image only mutable by its owner return image['owner'] == context.owner def is_image_visible(context, image, status=None): """Return True if the image is visible in this context.""" return db_utils.is_image_visible(context, image, image_member_find, status) def _get_default_column_value(column_type): """Return the default value of the columns from DB table In postgreDB case, if no right default values are being set, an psycopg2.DataError will be thrown. """ type_schema = { 'datetime': None, 'big_integer': 0, 'integer': 0, 'string': '' } if isinstance(column_type, sa_sql.type_api.Variant): return _get_default_column_value(column_type.impl) return type_schema[column_type.__visit_name__] def _paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :returns: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) assert (not (sort_dir and sort_dirs)) # nosec # nosec: This function runs safely if the assertion fails. # Default the sort direction to ascending if sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir] * len(sort_keys) assert (len(sort_dirs) == len(sort_keys)) # nosec # nosec: This function runs safely if the assertion fails. if len(sort_dirs) < len(sort_keys): sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) default = '' # Default to an empty string if NULL # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) if v is None: v = default marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort_keys)): crit_attrs = [] for j in range(i): model_attr = getattr(model, sort_keys[j]) default = _get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case( (model_attr != None, model_attr), else_=default) crit_attrs.append((attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) default = _get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case( (model_attr != None, model_attr), else_=default) if sort_dirs[i] == 'desc': crit_attrs.append((attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sa_sql.and_(*crit_attrs) criteria_list.append(criteria) f = sa_sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query def _make_conditions_from_filters(filters, is_public=None): # NOTE(venkatesh) make copy of the filters are to be altered in this # method. filters = filters.copy() image_conditions = [] prop_conditions = [] tag_conditions = [] if is_public is not None: if is_public: image_conditions.append(models.Image.visibility == 'public') else: image_conditions.append(models.Image.visibility != 'public') if 'os_hidden' in filters: os_hidden = filters.pop('os_hidden') image_conditions.append(models.Image.os_hidden == os_hidden) if 'checksum' in filters: checksum = filters.pop('checksum') image_conditions.append(models.Image.checksum == checksum) if 'os_hash_value' in filters: os_hash_value = filters.pop('os_hash_value') image_conditions.append(models.Image.os_hash_value == os_hash_value) for (k, v) in filters.pop('properties', {}).items(): prop_filters = _make_image_property_condition(key=k, value=v) prop_conditions.append(prop_filters) if 'changes-since' in filters: # normalize timestamp to UTC, as sqlalchemy doesn't appear to # respect timezone offsets changes_since = timeutils.normalize_time(filters.pop('changes-since')) image_conditions.append(models.Image.updated_at > changes_since) if 'deleted' in filters: deleted_filter = filters.pop('deleted') image_conditions.append(models.Image.deleted == deleted_filter) # TODO(bcwaldon): handle this logic in registry server if not deleted_filter: image_statuses = [s for s in STATUSES if s != 'killed'] image_conditions.append(models.Image.status.in_(image_statuses)) if 'tags' in filters: tags = filters.pop('tags') for tag in tags: alias = sa_orm.aliased(models.ImageTag) tag_filters = [alias.deleted == False] tag_filters.extend([alias.value == tag]) tag_conditions.append((alias, tag_filters)) filters = {k: v for k, v in filters.items() if v is not None} # need to copy items because filters is modified in the loop body # (filters.pop(k)) keys = list(filters.keys()) for k in keys: key = k if k.endswith('_min') or k.endswith('_max'): key = key[0:-4] try: v = int(filters.pop(k)) except ValueError: msg = _("Unable to filter on a range " "with a non-numeric value.") raise exception.InvalidFilterRangeValue(msg) if k.endswith('_min'): image_conditions.append(getattr(models.Image, key) >= v) if k.endswith('_max'): image_conditions.append(getattr(models.Image, key) <= v) elif k in ['created_at', 'updated_at']: attr_value = getattr(models.Image, key) operator, isotime = utils.split_filter_op(filters.pop(k)) try: parsed_time = timeutils.parse_isotime(isotime) threshold = timeutils.normalize_time(parsed_time) except ValueError: msg = (_("Bad \"%s\" query filter format. " "Use ISO 8601 DateTime notation.") % k) raise exception.InvalidParameterValue(msg) comparison = utils.evaluate_filter_op(attr_value, operator, threshold) image_conditions.append(comparison) elif k in ['name', 'id', 'status', 'container_format', 'disk_format']: attr_value = getattr(models.Image, key) operator, list_value = utils.split_filter_op(filters.pop(k)) if operator == 'in': threshold = utils.split_filter_value_for_quotes(list_value) comparison = attr_value.in_(threshold) image_conditions.append(comparison) elif operator == 'eq': image_conditions.append(attr_value == list_value) else: msg = (_("Unable to filter by unknown operator '%s'.") % operator) raise exception.InvalidFilterOperatorValue(msg) for (k, value) in filters.items(): if hasattr(models.Image, k): image_conditions.append(getattr(models.Image, k) == value) else: prop_filters = _make_image_property_condition(key=k, value=value) prop_conditions.append(prop_filters) return image_conditions, prop_conditions, tag_conditions def _make_image_property_condition(key, value): alias = sa_orm.aliased(models.ImageProperty) prop_filters = [alias.deleted == False] prop_filters.extend([alias.name == key]) prop_filters.extend([alias.value == value]) return alias, prop_filters def _select_images_query(context, session, image_conditions, admin_as_user, member_status, visibility): img_conditional_clause = sa_sql.and_(True, *image_conditions) regular_user = (not context.is_admin) or admin_as_user query_member = session.query(models.Image).join( models.Image.members).filter(img_conditional_clause) if regular_user: member_filters = [models.ImageMember.deleted == False] member_filters.extend([models.Image.visibility == 'shared']) if context.owner is not None: member_filters.extend([models.ImageMember.member == context.owner]) if member_status != 'all': member_filters.extend([ models.ImageMember.status == member_status]) query_member = query_member.filter(sa_sql.and_(*member_filters)) query_image = session.query(models.Image).filter(img_conditional_clause) if regular_user: visibility_filters = [ models.Image.visibility == 'public', models.Image.visibility == 'community', ] query_image = query_image .filter(sa_sql.or_(*visibility_filters)) query_image_owner = None if context.owner is not None: query_image_owner = session.query(models.Image).filter( models.Image.owner == context.owner).filter( img_conditional_clause) if query_image_owner is not None: query = query_image.union(query_image_owner, query_member) else: query = query_image.union(query_member) return query else: # Admin user return query_image def image_get_all( context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False, v1_mode=False, ): """ Get all images that match zero or more filters. :param filters: dict of filter keys and values. If a 'properties' key is present, it is treated as a dict of key/value filters on the image properties attribute :param marker: image id after which to start page :param limit: maximum number of images to return :param sort_key: list of image attributes by which results should be sorted :param sort_dir: directions in which results should be sorted (asc, desc) :param member_status: only return shared images that have this membership status :param is_public: If true, return only public images. If false, return only private and shared images. :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of images which it would see if it was a regular user :param return_tag: To indicates whether image entry in result includes it relevant tag entries. This could improve upper-layer query performance, to prevent using separated calls :param v1_mode: If true, mutates the 'visibility' value of each image into the v1-compatible field 'is_public' """ session = get_session() with session.begin(): return _image_get_all( context, session, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, member_status=member_status, is_public=is_public, admin_as_user=admin_as_user, return_tag=return_tag, v1_mode=v1_mode) def _image_get_all( context, session, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False, v1_mode=False, ): sort_key = ['created_at'] if not sort_key else sort_key default_sort_dir = 'desc' if not sort_dir: sort_dir = [default_sort_dir] * len(sort_key) elif len(sort_dir) == 1: default_sort_dir = sort_dir[0] sort_dir *= len(sort_key) filters = filters or {} visibility = filters.pop('visibility', None) showing_deleted = 'changes-since' in filters or filters.get('deleted', False) img_cond, prop_cond, tag_cond = _make_conditions_from_filters( filters, is_public) query = _select_images_query(context, session, img_cond, admin_as_user, member_status, visibility) if visibility is not None: # with a visibility, we always and only include images with that # visibility except when using the 'all' visibility if visibility != 'all': query = query.filter(models.Image.visibility == visibility) elif context.owner is None: # without either a visibility or an owner, we never include # 'community' images query = query.filter(models.Image.visibility != 'community') else: # without a visibility and with an owner, we only want to include # 'community' images if and only if they are owned by this owner community_filters = [ models.Image.owner == context.owner, models.Image.visibility != 'community', ] query = query.filter(sa_sql.or_(*community_filters)) if prop_cond: for alias, prop_condition in prop_cond: query = query.join(alias).filter(sa_sql.and_(*prop_condition)) if tag_cond: for alias, tag_condition in tag_cond: query = query.join(alias).filter(sa_sql.and_(*tag_condition)) marker_image = None if marker is not None: marker_image = _image_get(context, session, marker, force_show_deleted=showing_deleted) for key in ['created_at', 'id']: if key not in sort_key: sort_key.append(key) sort_dir.append(default_sort_dir) query = _paginate_query(query, models.Image, limit, sort_key, marker=marker_image, sort_dir=None, sort_dirs=sort_dir) query = query.options(sa_orm.joinedload( models.Image.properties)).options( sa_orm.joinedload(models.Image.locations)) if return_tag: query = query.options(sa_orm.joinedload(models.Image.tags)) images = [] for image in query.all(): image_dict = image.to_dict() image_dict = _normalize_locations( context, image_dict, force_show_deleted=showing_deleted) if return_tag: image_dict = _normalize_tags(image_dict) if v1_mode: image_dict = db_utils.mutate_image_dict_to_v1(image_dict) images.append(image_dict) return images def _drop_protected_attrs(model_class, values): """ Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] def _image_get_disk_usage_by_owner(context, session, owner, image_id=None): query = session.query(models.Image) query = query.filter(models.Image.owner == owner) if image_id is not None: query = query.filter(models.Image.id != image_id) query = query.filter(models.Image.size > 0) query = query.filter(~models.Image.status.in_(['killed', 'deleted'])) images = query.all() total = 0 for i in images: locations = [location for location in i.locations if location['status'] != 'deleted'] total += (i.size * len(locations)) return total def _image_get_staging_usage_by_owner(context, session, owner): # NOTE(danms): We could do this in a single query, but I think it is # easier to understand as two that we concatenate while generating # results. # Images in uploading or importing state are consuming staging # space. query = session.query(models.Image) query = query.filter(models.Image.owner == owner) query = query.filter(models.Image.size > 0) query = query.filter(models.Image.status.in_(('uploading', 'importing'))) importing_images = query.all() # Images with non-empty os_glance_importing_to_stores properties # may also be consuming staging space. Filter our deleted images # or importing ones included in the above query. props = session.query(models.ImageProperty).filter( models.ImageProperty.name == 'os_glance_importing_to_stores', models.ImageProperty.value != '').subquery() query = session.query(models.Image) query = query.join(props, props.c.image_id == models.Image.id) query = query.filter(models.Image.owner == owner) query = query.filter(models.Image.size > 0) query = query.filter(~models.Image.status.in_(('uploading', 'importing', 'killed', 'deleted'))) copying_images = query.all() return sum(i.size for i in itertools.chain(importing_images, copying_images)) def _image_get_count_by_owner(context, session, owner): query = session.query(models.Image) query = query.filter(models.Image.owner == owner) query = query.filter(~models.Image.status.in_(['killed', 'deleted'])) return query.count() def _image_get_uploading_count_by_owner(context, session, owner): """Return a count of the images uploading or importing.""" importing_statuses = ('saving', 'uploading', 'importing') # Images in any state indicating uploading, including through image_upload # or importing count for this. query = session.query(models.Image) query = query.filter(models.Image.owner == owner) query = query.filter(models.Image.status.in_(importing_statuses)) uploading = query.count() # Images that are not in the above list, but are not deleted and # in the process of doing a copy count for this. props = session.query(models.ImageProperty).filter( models.ImageProperty.name == 'os_glance_importing_to_stores', models.ImageProperty.value != '').subquery() query = session.query(models.Image) query = query.join(props, props.c.image_id == models.Image.id) query = query.filter(models.Image.owner == owner) query = query.filter(~models.Image.status.in_(importing_statuses + ('killed', 'deleted'))) copying = query.count() return uploading + copying def _validate_image(values, mandatory_status=True): """ Validates the incoming data and raises a Invalid exception if anything is out of order. :param values: Mapping of image metadata to check :param mandatory_status: Whether to validate status from values """ if mandatory_status: status = values.get('status') if not status: msg = "Image status is required." raise exception.Invalid(msg) if status not in STATUSES: msg = "Invalid image status '%s' for image." % status raise exception.Invalid(msg) # validate integer values to eliminate DBError on save _validate_db_int(min_disk=values.get('min_disk'), min_ram=values.get('min_ram')) return values def _update_values(image_ref, values): for k in values: if getattr(image_ref, k) != values[k]: setattr(image_ref, k, values[k]) def image_set_property_atomic(image_id, name, value): """ Atomically set an image property to a value. This will only succeed if the property does not currently exist and it was created successfully. This can be used by multiple competing threads to ensure that only one of those threads succeeded in creating the property. Note that ImageProperty objects are marked as deleted=$id and so we must first try to atomically update-and-undelete such a property, if it exists. If that does not work, we should try to create the property. The latter should fail with DBDuplicateEntry because of the UniqueConstraint across ImageProperty(image_id, name). :param image_id: The ID of the image on which to create the property :param name: The property name :param value: The value to set for the property :raises Duplicate: If the property already exists """ session = get_session() with session.begin(): connection = session.connection() table = models.ImageProperty.__table__ # This should be: # UPDATE image_properties SET value=$value, deleted=False # WHERE name=$name AND deleted!=False result = connection.execute(table.update().where( sa_sql.and_(table.c.name == name, table.c.image_id == image_id, table.c.deleted != False)).values( value=value, deleted=False)) if result.rowcount == 1: # Found and updated a deleted property, so we win return # There might have been no deleted property, or the property # exists and is undeleted, so try to create it and use that # to determine if we've lost the race or not. try: connection.execute(table.insert(), dict(deleted=False, created_at=timeutils.utcnow(), image_id=image_id, name=name, value=value)) except db_exception.DBDuplicateEntry: # Lost the race to create the new property raise exception.Duplicate() # If we got here, we created a new row, UniqueConstraint would have # caused us to fail if we lost the race def image_delete_property_atomic(image_id, name, value): """ Atomically delete an image property. This will only succeed if the referenced image has a property set to exactly the value provided. :param image_id: The ID of the image on which to delete the property :param name: The property name :param value: The value the property is expected to be set to :raises NotFound: If the property does not exist """ session = get_session() with session.begin(): connection = session.connection() table = models.ImageProperty.__table__ result = connection.execute(table.delete().where( sa_sql.and_(table.c.name == name, table.c.value == value, table.c.image_id == image_id, table.c.deleted == False))) if result.rowcount == 1: return raise exception.NotFound() @utils.no_4byte_params def _image_update(context, session, image_id, values, purge_props=False, from_state=None, atomic_props=None): """ Used internally by image_create and image_update :param context: Request context :param image_id: If None, create the image, otherwise, find and update it :param values: A dict of attributes to set :param from_state: If not None, reequire the image be in this state to do the update :param purge_props: If True, delete properties found in the database but not present in values :param atomic_props: If non-None, refuse to create or update properties in this list """ # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() # Remove the properties passed in the values mapping. We # handle properties separately from base image attributes, # and leaving properties in the values mapping will cause # a SQLAlchemy model error because SQLAlchemy expects the # properties attribute of an Image model to be a list and # not a dict. properties = values.pop('properties', {}) location_data = values.pop('locations', None) new_status = values.get('status') if image_id: image_ref = _image_get(context, session, image_id) current = image_ref.status # Perform authorization check _check_mutate_authorization(context, image_ref) else: if values.get('size') is not None: values['size'] = int(values['size']) if 'min_ram' in values: values['min_ram'] = int(values['min_ram'] or 0) if 'min_disk' in values: values['min_disk'] = int(values['min_disk'] or 0) values['protected'] = bool(values.get('protected', False)) image_ref = models.Image() values = db_utils.ensure_image_dict_v2_compliant(values) # Need to canonicalize ownership if 'owner' in values and not values['owner']: values['owner'] = None if image_id: # Don't drop created_at if we're passing it in... _drop_protected_attrs(models.Image, values) # NOTE(iccha-sethi): updated_at must be explicitly set in case # only ImageProperty table was modifited values['updated_at'] = timeutils.utcnow() if image_id: query = session.query(models.Image).filter_by(id=image_id) if from_state: query = query.filter_by(status=from_state) mandatory_status = True if new_status else False _validate_image(values, mandatory_status=mandatory_status) # Validate fields for Images table. This is similar to what is done # for the query result update except that we need to do it prior # in this case. values = {key: values[key] for key in values if key in image_ref.to_dict()} updated = query.update(values, synchronize_session='fetch') if not updated: msg = (_('cannot transition from %(current)s to ' '%(next)s in update (wanted ' 'from_state=%(from)s)') % {'current': current, 'next': new_status, 'from': from_state}) raise exception.Conflict(msg) image_ref = _image_get(context, session, image_id) else: image_ref.update(values) # Validate the attributes before we go any further. From my # investigation, the @validates decorator does not validate # on new records, only on existing records, which is, well, # idiotic. values = _validate_image(image_ref.to_dict()) _update_values(image_ref, values) try: image_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.Duplicate("Image ID %s already exists!" % values['id']) _set_properties_for_image( context, session, image_ref, properties, purge_props, atomic_props) if location_data: _image_locations_set( context, session, image_ref.id, location_data) return image_ref def image_location_add(context, image_id, location): session = get_session() with session.begin(): _image_location_add(context, session, image_id, location) @utils.no_4byte_params def _image_location_add(context, session, image_id, location): deleted = location['status'] in ('deleted', 'pending_delete') delete_time = timeutils.utcnow() if deleted else None location_ref = models.ImageLocation(image_id=image_id, value=location['url'], meta_data=location['metadata'], status=location['status'], deleted=deleted, deleted_at=delete_time) location_ref.save(session=session) def image_location_update(context, image_id, location): session = get_session() with session.begin(): _image_location_update(context, session, image_id, location) @utils.no_4byte_params def _image_location_update(context, session, image_id, location): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) try: location_ref = session.query(models.ImageLocation).filter_by( id=loc_id).filter_by(image_id=image_id).one() deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None location_ref.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warning(msg) raise exception.NotFound(msg) def image_location_delete(context, image_id, location_id, status, delete_time=None): session = get_session() with session.begin(): _image_location_delete( context, session, image_id, location_id, status, delete_time=None, ) def _image_location_delete( context, session, image_id, location_id, status, delete_time=None, ): if status not in ('deleted', 'pending_delete'): msg = _("The status of deleted image location can only be set to " "'pending_delete' or 'deleted'") raise exception.Invalid(msg) try: location_ref = session.query(models.ImageLocation).filter_by( id=location_id).filter_by(image_id=image_id).one() delete_time = delete_time or timeutils.utcnow() location_ref.update({"deleted": True, "status": status, "updated_at": delete_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=location_id, img=image_id)) LOG.warning(msg) raise exception.NotFound(msg) def _image_locations_set(context, session, image_id, locations): # NOTE(zhiyan): 1. Remove records from DB for deleted locations query = session.query(models.ImageLocation).filter_by( image_id=image_id).filter_by(deleted=False) loc_ids = [loc['id'] for loc in locations if loc.get('id')] if loc_ids: query = query.filter(~models.ImageLocation.id.in_(loc_ids)) for loc_id in [loc_ref.id for loc_ref in query.all()]: _image_location_delete(context, session, image_id, loc_id, 'deleted') # NOTE(zhiyan): 2. Adding or update locations for loc in locations: if loc.get('id') is None: _image_location_add(context, session, image_id, loc) else: _image_location_update(context, session, image_id, loc) def _image_locations_delete_all( context, session, image_id, delete_time=None, ): """Delete all image locations for given image""" location_refs = session.query(models.ImageLocation).filter_by( image_id=image_id).filter_by(deleted=False).all() for loc_id in [loc_ref.id for loc_ref in location_refs]: _image_location_delete(context, session, image_id, loc_id, 'deleted', delete_time=delete_time) @utils.no_4byte_params def _set_properties_for_image(context, session, image_ref, properties, purge_props=False, atomic_props=None): """ Create or update a set of image_properties for a given image :param context: Request context :param session: A SQLAlchemy session to use :param image_ref: An Image object :param properties: A dict of properties to set :param purge_props: If True, delete properties in the database that are not in properties :param atomic_props: If non-None, skip update/create/delete of properties named in this list """ if atomic_props is None: atomic_props = [] orig_properties = {} for prop_ref in image_ref.properties: orig_properties[prop_ref.name] = prop_ref for name, value in properties.items(): prop_values = {'image_id': image_ref.id, 'name': name, 'value': value} if name in atomic_props: # NOTE(danms): Never update or create properties in the list # of atomics continue elif name in orig_properties: prop_ref = orig_properties[name] _image_property_update(context, session, prop_ref, prop_values) else: _image_property_create(context, session, prop_values) if purge_props: for key in orig_properties.keys(): if key in atomic_props: continue elif key not in properties: prop_ref = orig_properties[key] _image_property_delete(context, session, prop_ref.name, image_ref.id) def _image_child_entry_delete_all( context, session, child_model_cls, image_id, delete_time=None, ): """Deletes all the child entries for the given image id. Deletes all the child entries of the given child entry ORM model class using the parent image's id. The child entry ORM model class can be one of the following: model.ImageLocation, model.ImageProperty, model.ImageMember and model.ImageTag. :param context: Request context :param session: A SQLAlchemy session to use :param child_model_cls: the ORM model class. :param image_id: id of the image whose child entries are to be deleted. :param delete_time: datetime of deletion to be set. If None, uses current datetime. :rtype: int :returns: The number of child entries got soft-deleted. """ query = session.query(child_model_cls).filter_by( image_id=image_id).filter_by(deleted=False) delete_time = delete_time or timeutils.utcnow() count = query.update({"deleted": True, "deleted_at": delete_time}) return count def image_property_create(context, values): """Create an ImageProperty object.""" session = get_session() with session.begin(): return _image_property_create(context, session, values) def _image_property_create(context, session, values): prop_ref = models.ImageProperty() prop = _image_property_update(context, session, prop_ref, values) return prop.to_dict() def _image_property_update(context, session, prop_ref, values): """ Used internally by image_property_create and image_property_update. """ _drop_protected_attrs(models.ImageProperty, values) values["deleted"] = False prop_ref.update(values) prop_ref.save(session=session) return prop_ref def image_property_delete(context, prop_ref, image_ref): session = get_session() with session.begin(): return _image_property_delete(context, session, prop_ref, image_ref) def _image_property_delete(context, session, prop_ref, image_ref): """ Used internally by _set_properties_for_image(). """ prop = session.query(models.ImageProperty).filter_by(image_id=image_ref, name=prop_ref).one() try: prop.delete(session=session) except sa_orm.exc.StaleDataError as e: LOG.debug(('StaleDataError while deleting property %(prop)r ' 'from image %(image)r likely means we raced during delete: ' '%(err)s'), {'prop': prop_ref, 'image': image_ref, 'err': str(e)}) return return prop def _image_property_delete_all(context, session, image_id, delete_time=None): """Delete all image properties for given image""" props_updated_count = _image_child_entry_delete_all( context, session, models.ImageProperty, image_id, delete_time) return props_updated_count @utils.no_4byte_params def image_member_create(context, values): """Create an ImageMember object.""" session = get_session() with session.begin(): memb_ref = models.ImageMember() _image_member_update(context, session, memb_ref, values) return _image_member_format(memb_ref) def _image_member_format(member_ref): """Format a member ref for consumption outside of this module.""" return { 'id': member_ref['id'], 'image_id': member_ref['image_id'], 'member': member_ref['member'], 'can_share': member_ref['can_share'], 'status': member_ref['status'], 'created_at': member_ref['created_at'], 'updated_at': member_ref['updated_at'], 'deleted': member_ref['deleted'] } def image_member_update(context, memb_id, values): """Update an ImageMember object.""" session = get_session() with session.begin(): memb_ref = _image_member_get(context, session, memb_id) _image_member_update(context, session, memb_ref, values) return _image_member_format(memb_ref) def _image_member_update(context, session, memb_ref, values): """Apply supplied dictionary of values to a Member object.""" _drop_protected_attrs(models.ImageMember, values) values["deleted"] = False values.setdefault('can_share', False) memb_ref.update(values) memb_ref.save(session=session) return memb_ref def image_member_delete(context, memb_id): """Delete an ImageMember object.""" session = get_session() with session.begin(): member_ref = _image_member_get(context, session, memb_id) _image_member_delete(context, session, member_ref) def _image_member_delete(context, session, memb_ref): memb_ref.delete(session=session) def _image_member_delete_all(context, session, image_id, delete_time=None): """Delete all image members for given image""" members_updated_count = _image_child_entry_delete_all( context, session, models.ImageMember, image_id, delete_time) return members_updated_count def _image_member_get(context, session, memb_id): """Fetch an ImageMember entity by id.""" query = session.query(models.ImageMember) query = query.filter_by(id=memb_id) return query.one() def image_member_find(context, image_id=None, member=None, status=None, include_deleted=False): """Find all members that meet the given criteria. Note, currently include_deleted should be true only when create a new image membership, as there may be a deleted image membership between the same image and tenant, the membership will be reused in this case. It should be false in other cases. :param image_id: identifier of image entity :param member: tenant to which membership has been granted :include_deleted: A boolean indicating whether the result should include the deleted record of image member """ session = get_session() with session.begin(): members = _image_member_find(context, session, image_id, member, status, include_deleted) return [_image_member_format(m) for m in members] def _image_member_find(context, session, image_id=None, member=None, status=None, include_deleted=False): query = session.query(models.ImageMember) if not include_deleted: query = query.filter_by(deleted=False) if not context.is_admin: query = query.join(models.Image) filters = [ models.Image.owner == context.owner, models.ImageMember.member == context.owner, ] query = query.filter(sa_sql.or_(*filters)) if image_id is not None: query = query.filter(models.ImageMember.image_id == image_id) if member is not None: query = query.filter(models.ImageMember.member == member) if status is not None: query = query.filter(models.ImageMember.status == status) return query.all() def image_member_count(context, image_id): """Return the number of image members for this image :param image_id: identifier of image entity """ session = get_session() if not image_id: msg = _("Image id is required.") raise exception.Invalid(msg) with session.begin(): query = session.query(models.ImageMember) query = query.filter_by(deleted=False) query = query.filter(models.ImageMember.image_id == str(image_id)) return query.count() def image_tag_set_all(context, image_id, tags): # NOTE(kragniz): tag ordering should match exactly what was provided, so a # subsequent call to image_tag_get_all returns them in the correct order session = get_session() with session.begin(): existing_tags = _image_tag_get_all(context, session, image_id) tags_created = [] for tag in tags: if tag not in tags_created and tag not in existing_tags: tags_created.append(tag) _image_tag_create(context, session, image_id, tag) for tag in existing_tags: if tag not in tags: _image_tag_delete(context, session, image_id, tag) def image_tag_create(context, image_id, value): session = get_session() with session.begin(): return _image_tag_create(context, session, image_id, value) @utils.no_4byte_params def _image_tag_create(context, session, image_id, value): """Create an image tag.""" tag_ref = models.ImageTag(image_id=image_id, value=value) tag_ref.save(session=session) return tag_ref['value'] def image_tag_delete(context, image_id, value): session = get_session() with session.begin(): _image_tag_delete(context, session, image_id, value) def _image_tag_delete(context, session, image_id, value): """Delete an image tag.""" _check_image_id(image_id) query = session.query(models.ImageTag).filter_by( image_id=image_id).filter_by( value=value).filter_by(deleted=False) try: tag_ref = query.one() except sa_orm.exc.NoResultFound: raise exception.NotFound() tag_ref.delete(session=session) def _image_tag_delete_all(context, session, image_id, delete_time=None): """Delete all image tags for given image""" tags_updated_count = _image_child_entry_delete_all( context, session, models.ImageTag, image_id, delete_time) return tags_updated_count def image_tag_get_all(context, image_id): session = get_session() with session.begin(): return _image_tag_get_all(context, session, image_id) def _image_tag_get_all(context, session, image_id): """Get a list of tags for a specific image.""" _check_image_id(image_id) tags = session.query(models.ImageTag.value).filter_by( image_id=image_id).filter_by(deleted=False).all() return [tag[0] for tag in tags] class DeleteFromSelect(sa_sql.expression.UpdateBase): inherit_cache = False def __init__(self, table, select, column): self.table = table self.select = select self.column = column # NOTE(abhishekk): MySQL doesn't yet support subquery with # 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select. @compiles(DeleteFromSelect) def visit_delete_from_select(element, compiler, **kw): return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( compiler.process(element.table, asfrom=True), compiler.process(element.column), element.column.name, compiler.process(element.select)) def purge_deleted_rows(context, age_in_days, max_rows): """Purges soft deleted rows Deletes rows of table images, table tasks and all dependent tables according to given age for relevant models. """ # check max_rows for its maximum limit _validate_db_int(max_rows=max_rows) session = get_session() metadata = MetaData() engine = get_engine() deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) tables = [] for model_class in models.__dict__.values(): if not hasattr(model_class, '__tablename__'): continue if hasattr(model_class, 'deleted'): tables.append(model_class.__tablename__) # First force purging of records that are not soft deleted but # are referencing soft deleted tasks/images records (e.g. task_info # records). Then purge all soft deleted records in glance tables in the # right order to avoid FK constraint violation. t = Table("tasks", metadata, autoload_with=engine) ti = Table("task_info", metadata, autoload_with=engine) joined_rec = ti.join(t, t.c.id == ti.c.task_id) deleted_task_info = sql.\ select(ti.c.task_id).where(t.c.deleted_at < deleted_age).\ select_from(joined_rec).\ order_by(t.c.deleted_at) if max_rows != -1: deleted_task_info = deleted_task_info.limit(max_rows) delete_statement = DeleteFromSelect(ti, deleted_task_info, ti.c.task_id) LOG.info(_LI('Purging deleted rows older than %(age_in_days)d day(s) ' 'from table %(tbl)s'), {'age_in_days': age_in_days, 'tbl': ti}) try: with session.begin(): result = session.execute(delete_statement) except (db_exception.DBError, db_exception.DBReferenceError) as ex: LOG.exception(_LE('DBError detected when force purging ' 'table=%(table)s: %(error)s'), {'table': ti, 'error': str(ex)}) raise rows = result.rowcount LOG.info(_LI('Deleted %(rows)d row(s) from table %(tbl)s'), {'rows': rows, 'tbl': ti}) # get rid of FK constraints for tbl in ('images', 'tasks'): try: tables.remove(tbl) except ValueError: LOG.warning(_LW('Expected table %(tbl)s was not found in DB.'), {'tbl': tbl}) else: # NOTE(abhishekk): To mitigate OSSN-0075 images records should be # purged with new ``purge-images-table`` command. if tbl == 'images': continue tables.append(tbl) for tbl in tables: tab = Table(tbl, metadata, autoload_with=engine) LOG.info( _LI('Purging deleted rows older than %(age_in_days)d day(s) ' 'from table %(tbl)s'), {'age_in_days': age_in_days, 'tbl': tbl}) column = tab.c.id deleted_at_column = tab.c.deleted_at query_delete = sql.select(column).\ where(deleted_at_column < deleted_age).\ order_by(deleted_at_column) if max_rows != -1: query_delete = query_delete.limit(max_rows) delete_statement = DeleteFromSelect(tab, query_delete, column) try: with session.begin(): result = session.execute(delete_statement) except db_exception.DBReferenceError as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('DBError detected when purging from ' "%(tablename)s: %(error)s"), {'tablename': tbl, 'error': str(ex)}) rows = result.rowcount LOG.info(_LI('Deleted %(rows)d row(s) from table %(tbl)s'), {'rows': rows, 'tbl': tbl}) def purge_deleted_rows_from_images(context, age_in_days, max_rows): """Purges soft deleted rows Deletes rows of table images table according to given age for relevant models. """ # check max_rows for its maximum limit _validate_db_int(max_rows=max_rows) session = get_session() metadata = MetaData() engine = get_engine() deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) tbl = 'images' tab = Table(tbl, metadata, autoload_with=engine) LOG.info( _LI('Purging deleted rows older than %(age_in_days)d day(s) ' 'from table %(tbl)s'), {'age_in_days': age_in_days, 'tbl': tbl}) column = tab.c.id deleted_at_column = tab.c.deleted_at query_delete = sql.\ select(column).\ where(deleted_at_column < deleted_age).\ order_by(deleted_at_column) if max_rows != -1: query_delete = query_delete.limit(max_rows) delete_statement = DeleteFromSelect(tab, query_delete, column) try: with session.begin(): result = session.execute(delete_statement) except db_exception.DBReferenceError as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('DBError detected when purging from ' "%(tablename)s: %(error)s"), {'tablename': tbl, 'error': str(ex)}) rows = result.rowcount LOG.info(_LI('Deleted %(rows)d row(s) from table %(tbl)s'), {'rows': rows, 'tbl': tbl}) def user_get_storage_usage(context, owner_id, image_id=None): _check_image_id(image_id) session = get_session() with session.begin(): total_size = _image_get_disk_usage_by_owner( context, session, owner_id, image_id=image_id) return total_size def user_get_staging_usage(context, owner_id): session = get_session() with session.begin(): return _image_get_staging_usage_by_owner(context, session, owner_id) def user_get_image_count(context, owner_id): session = get_session() with session.begin(): return _image_get_count_by_owner(context, session, owner_id) def user_get_uploading_count(context, owner_id): session = get_session() with session.begin(): return _image_get_uploading_count_by_owner(context, session, owner_id) def _task_info_format(task_info_ref): """Format a task info ref for consumption outside of this module""" if task_info_ref is None: return {} return { 'task_id': task_info_ref['task_id'], 'input': task_info_ref['input'], 'result': task_info_ref['result'], 'message': task_info_ref['message'], } def _task_info_create(context, session, task_id, values): """Create an TaskInfo object""" task_info_ref = models.TaskInfo() task_info_ref.task_id = task_id task_info_ref.update(values) task_info_ref.save(session=session) return _task_info_format(task_info_ref) def _task_info_update(context, session, task_id, values): """Update an TaskInfo object""" task_info_ref = _task_info_get(context, session, task_id) if task_info_ref: task_info_ref.update(values) task_info_ref.save(session=session) return _task_info_format(task_info_ref) def _task_info_get(context, session, task_id): """Fetch an TaskInfo entity by task_id""" query = session.query(models.TaskInfo) query = query.filter_by(task_id=task_id) try: task_info_ref = query.one() except sa_orm.exc.NoResultFound: LOG.debug("TaskInfo was not found for task with id %(task_id)s", {'task_id': task_id}) task_info_ref = None return task_info_ref def task_create(context, values): """Create a task object""" values = values.copy() session = get_session() with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = models.Task() _task_update(context, session, task_ref, values) _task_info_create(context, session, task_ref.id, task_info_values) with session.begin(): task_ref = _task_get(context, session, task_ref.id) return _task_format(task_ref, task_ref.info) def _pop_task_info_values(values): task_info_values = {} for k, v in list(values.items()): if k in ['input', 'result', 'message']: values.pop(k) task_info_values[k] = v return task_info_values def task_update(context, task_id, values): """Update a task object""" session = get_session() with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = _task_get(context, session, task_id) _drop_protected_attrs(models.Task, values) values['updated_at'] = timeutils.utcnow() _task_update(context, session, task_ref, values) if task_info_values: _task_info_update(context, session, task_id, task_info_values) with session.begin(): task_ref = _task_get(context, session, task_id) return _task_format(task_ref, task_ref.info) def task_get(context, task_id, force_show_deleted=False): """Fetch a task entity by id""" session = get_session() with session.begin(): task_ref = _task_get(context, session, task_id, force_show_deleted=force_show_deleted) return _task_format(task_ref, task_ref.info) def tasks_get_by_image(context, image_id): """Fetch all tasks associated with image_id""" session = get_session() with session.begin(): return _tasks_get_by_image(context, session, image_id) def _tasks_get_by_image(context, session, image_id): tasks = [] _task_soft_delete(context, session) query = session.query(models.Task).options( sa_orm.joinedload(models.Task.info) ).filter_by(image_id=image_id) expires_at = models.Task.expires_at query = query.filter(sa_sql.or_(expires_at == None, expires_at >= timeutils.utcnow())) updated_at = models.Task.updated_at query.filter( updated_at <= (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live))) if not context.can_see_deleted: query = query.filter_by(deleted=False) try: task_refs = query.all() except sa_orm.exc.NoResultFound: LOG.debug("No task found for image with ID %s", image_id) return tasks for task_ref in task_refs: # Make sure the task is visible if not _is_task_visible(context, task_ref): msg = "Task %s is not visible, excluding" % task_ref.id LOG.debug(msg) continue tasks.append(_task_format(task_ref, task_ref.info)) return tasks def task_delete(context, task_id): """Delete a task""" session = get_session() with session.begin(): task_ref = _task_get(context, session, task_id) task_ref.delete(session=session) return _task_format(task_ref, task_ref.info) def _task_soft_delete(context, session): """Scrub task entities which are expired """ expires_at = models.Task.expires_at query = session.query(models.Task) query = (query.filter(models.Task.owner == context.owner) .filter_by(deleted=False) .filter(expires_at <= timeutils.utcnow())) values = {'deleted': True, 'deleted_at': timeutils.utcnow()} query.update(values) def task_get_all(context, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc', admin_as_user=False): """ Get all tasks that match zero or more filters. :param filters: dict of filter keys and values. :param marker: task id after which to start page :param limit: maximum number of tasks to return :param sort_key: task attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of tasks which it would see if it were a regular user :returns: tasks set """ session = get_session() with session.begin(): return _task_get_all( context, session, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, admin_as_user=admin_as_user, ) def _task_get_all( context, session, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc', admin_as_user=False, ): filters = filters or {} query = session.query(models.Task) if not (context.is_admin or admin_as_user) and context.owner is not None: query = query.filter(models.Task.owner == context.owner) _task_soft_delete(context, session) showing_deleted = False if 'deleted' in filters: deleted_filter = filters.pop('deleted') query = query.filter_by(deleted=deleted_filter) showing_deleted = deleted_filter for (k, v) in filters.items(): if v is not None: key = k if hasattr(models.Task, key): query = query.filter(getattr(models.Task, key) == v) marker_task = None if marker is not None: marker_task = _task_get(context, session, marker, force_show_deleted=showing_deleted) sort_keys = ['created_at', 'id'] if sort_key not in sort_keys: sort_keys.insert(0, sort_key) query = _paginate_query(query, models.Task, limit, sort_keys, marker=marker_task, sort_dir=sort_dir) task_refs = query.all() tasks = [] for task_ref in task_refs: tasks.append(_task_format(task_ref, task_info_ref=None)) return tasks def _is_task_visible(context, task): """Return True if the task is visible in this context.""" # Is admin == task visible if context.is_admin: return True # No owner == task visible if task['owner'] is None: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == task['owner']: return True return False def _task_get(context, session, task_id, force_show_deleted=False): """Fetch a task entity by id""" query = session.query(models.Task).options( sa_orm.joinedload(models.Task.info) ).filter_by(id=task_id) if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) try: task_ref = query.one() except sa_orm.exc.NoResultFound: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) # Make sure the task is visible if not _is_task_visible(context, task_ref): msg = "Forbidding request, task %s is not visible" % task_id LOG.debug(msg) raise exception.Forbidden(msg) return task_ref def _task_update(context, session, task_ref, values): """Apply supplied dictionary of values to a task object.""" if 'deleted' not in values: values["deleted"] = False task_ref.update(values) task_ref.save(session=session) return task_ref def _task_format(task_ref, task_info_ref=None): """Format a task ref for consumption outside of this module""" task_dict = { 'id': task_ref['id'], 'type': task_ref['type'], 'status': task_ref['status'], 'owner': task_ref['owner'], 'expires_at': task_ref['expires_at'], 'created_at': task_ref['created_at'], 'updated_at': task_ref['updated_at'], 'deleted_at': task_ref['deleted_at'], 'deleted': task_ref['deleted'], 'image_id': task_ref['image_id'], 'request_id': task_ref['request_id'], 'user_id': task_ref['user_id'], } if task_info_ref: task_info_dict = { 'input': task_info_ref['input'], 'result': task_info_ref['result'], 'message': task_info_ref['message'], } task_dict.update(task_info_dict) return task_dict def metadef_namespace_get_all( context, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None, ): """List all available namespaces.""" session = get_session() with session.begin(): return metadef_namespace_api.get_all( context, session, marker, limit, sort_key, sort_dir, filters) def metadef_namespace_get(context, namespace_name): """Get a namespace or raise if it does not exist or is not visible.""" session = get_session() with session.begin(): return metadef_namespace_api.get( context, session, namespace_name) @utils.no_4byte_params def metadef_namespace_create(context, values): """Create a namespace or raise if it already exists.""" session = get_session() with session.begin(): return metadef_namespace_api.create(context, session, values) @utils.no_4byte_params def metadef_namespace_update(context, namespace_id, namespace_dict): """Update a namespace or raise if it does not exist or not visible""" session = get_session() with session.begin(): return metadef_namespace_api.update( context, session, namespace_id, namespace_dict) def metadef_namespace_delete(context, namespace_name): """Delete the namespace and all foreign references""" session = get_session() with session.begin(): return metadef_namespace_api.delete_cascade( context, session, namespace_name) def metadef_object_get_all(context, namespace_name): """Get a metadata-schema object or raise if it does not exist.""" session = get_session() with session.begin(): return metadef_object_api.get_all( context, session, namespace_name) def metadef_object_get(context, namespace_name, object_name): """Get a metadata-schema object or raise if it does not exist.""" session = get_session() with session.begin(): return metadef_object_api.get( context, session, namespace_name, object_name) @utils.no_4byte_params def metadef_object_create(context, namespace_name, object_dict): """Create a metadata-schema object or raise if it already exists.""" session = get_session() with session.begin(): return metadef_object_api.create( context, session, namespace_name, object_dict) @utils.no_4byte_params def metadef_object_update(context, namespace_name, object_id, object_dict): """Update an object or raise if it does not exist or not visible.""" session = get_session() with session.begin(): return metadef_object_api.update( context, session, namespace_name, object_id, object_dict) def metadef_object_delete(context, namespace_name, object_name): """Delete an object or raise if namespace or object doesn't exist.""" session = get_session() with session.begin(): return metadef_object_api.delete( context, session, namespace_name, object_name) def metadef_object_delete_namespace_content(context, namespace_name): """Delete an object or raise if namespace or object doesn't exist.""" session = get_session() with session.begin(): return metadef_object_api.delete_by_namespace_name( context, session, namespace_name) def metadef_object_count(context, namespace_name): """Get count of properties for a namespace, raise if ns doesn't exist.""" session = get_session() with session.begin(): return metadef_object_api.count(context, session, namespace_name) def metadef_property_get_all(context, namespace_name): """Get a metadef property or raise if it does not exist.""" session = get_session() with session.begin(): return metadef_property_api.get_all(context, session, namespace_name) def metadef_property_get(context, namespace_name, property_name): """Get a metadef property or raise if it does not exist.""" session = get_session() with session.begin(): return metadef_property_api.get( context, session, namespace_name, property_name) @utils.no_4byte_params def metadef_property_create(context, namespace_name, property_dict): """Create a metadef property or raise if it already exists.""" session = get_session() with session.begin(): return metadef_property_api.create( context, session, namespace_name, property_dict) @utils.no_4byte_params def metadef_property_update(context, namespace_name, property_id, property_dict): """Update an object or raise if it does not exist or not visible.""" session = get_session() with session.begin(): return metadef_property_api.update( context, session, namespace_name, property_id, property_dict) def metadef_property_delete(context, namespace_name, property_name): """Delete a property or raise if it or namespace doesn't exist.""" session = get_session() with session.begin(): return metadef_property_api.delete( context, session, namespace_name, property_name) def metadef_property_delete_namespace_content(context, namespace_name): """Delete a property or raise if it or namespace doesn't exist.""" session = get_session() with session.begin(): return metadef_property_api.delete_by_namespace_name( context, session, namespace_name) def metadef_property_count(context, namespace_name): """Get count of properties for a namespace, raise if ns doesn't exist.""" session = get_session() with session.begin(): return metadef_property_api.count(context, session, namespace_name) def metadef_resource_type_create(context, values): """Create a resource_type""" session = get_session() with session.begin(): return metadef_resource_type_api.create( context, session, values) def metadef_resource_type_get(context, resource_type_name): """Get a resource_type""" session = get_session() with session.begin(): return metadef_resource_type_api.get( context, session, resource_type_name) def metadef_resource_type_get_all(context): """list all resource_types""" session = get_session() with session.begin(): return metadef_resource_type_api.get_all(context, session) def metadef_resource_type_delete(context, resource_type_name): """Get a resource_type""" session = get_session() with session.begin(): return metadef_resource_type_api.delete( context, session, resource_type_name) def metadef_resource_type_association_get( context, namespace_name, resource_type_name, ): session = get_session() with session.begin(): return metadef_association_api.get( context, session, namespace_name, resource_type_name) def metadef_resource_type_association_create( context, namespace_name, values, ): session = get_session() with session.begin(): return metadef_association_api.create( context, session, namespace_name, values) def metadef_resource_type_association_delete( context, namespace_name, resource_type_name, ): session = get_session() with session.begin(): return metadef_association_api.delete( context, session, namespace_name, resource_type_name) def metadef_resource_type_association_get_all_by_namespace( context, namespace_name, ): session = get_session() with session.begin(): return metadef_association_api.get_all_by_namespace( context, session, namespace_name) def metadef_tag_get_all( context, namespace_name, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc', ): """Get metadata-schema tags or raise if none exist.""" session = get_session() with session.begin(): return metadef_tag_api.get_all( context, session, namespace_name, filters, marker, limit, sort_key, sort_dir) def metadef_tag_get(context, namespace_name, name): """Get a metadata-schema tag or raise if it does not exist.""" session = get_session() with session.begin(): return metadef_tag_api.get( context, session, namespace_name, name) @utils.no_4byte_params def metadef_tag_create(context, namespace_name, tag_dict): """Create a metadata-schema tag or raise if it already exists.""" session = get_session() with session.begin(): return metadef_tag_api.create( context, session, namespace_name, tag_dict) def metadef_tag_create_tags(context, namespace_name, tag_list, can_append=False): """Create a metadata-schema tag or raise if it already exists.""" session = get_session() with session.begin(): return metadef_tag_api.create_tags( context, session, namespace_name, tag_list, can_append) @utils.no_4byte_params def metadef_tag_update(context, namespace_name, id, tag_dict): """Update an tag or raise if it does not exist or not visible.""" session = get_session() with session.begin(): return metadef_tag_api.update( context, session, namespace_name, id, tag_dict) def metadef_tag_delete(context, namespace_name, name): """Delete an tag or raise if namespace or tag doesn't exist.""" session = get_session() with session.begin(): return metadef_tag_api.delete( context, session, namespace_name, name) def metadef_tag_delete_namespace_content(context, namespace_name): """Delete an tag or raise if namespace or tag doesn't exist.""" session = get_session() with session.begin(): return metadef_tag_api.delete_by_namespace_name( context, session, namespace_name) def metadef_tag_count(context, namespace_name): """Get count of tags for a namespace, raise if ns doesn't exist.""" session = get_session() with session.begin(): return metadef_tag_api.count(context, session, namespace_name) def _cached_image_format(cached_image): """Format a cached image for consumption outside of this module""" image_dict = { 'id': cached_image['id'], 'image_id': cached_image['image_id'], 'last_accessed': cached_image['last_accessed'].timestamp(), 'last_modified': cached_image['last_modified'].timestamp(), 'size': cached_image['size'], 'hits': cached_image['hits'], 'checksum': cached_image['checksum'] } return image_dict def node_reference_get_by_url(context, node_reference_url): """Get a node reference by node reference url""" session = get_session() with session.begin(): try: query = session.query(models.NodeReference) query = query.filter_by(node_reference_url=node_reference_url) return query.one() except sa_orm.exc.NoResultFound: msg = _("The node reference %s" " was not found." % node_reference_url) LOG.debug(msg) raise exception.NotFound(msg) @utils.no_4byte_params def node_reference_create(context, node_reference_url): """Create a node_reference or raise if it already exists.""" session = get_session() values = {'node_reference_url': node_reference_url} with session.begin(): node_reference = models.NodeReference() node_reference.update(values.copy()) try: node_reference.save(session=session) except db_exception.DBDuplicateEntry: raise exception.Duplicate() return node_reference def get_hit_count(context, image_id, node_reference_url): session = get_session() node_id = models.NodeReference.node_reference_id filters = [ models.CachedImages.image_id == image_id, models.NodeReference.node_reference_url == node_reference_url, ] with session.begin(): try: query = session.query( models.CachedImages.hits).join( models.NodeReference, node_id == models.CachedImages.node_reference_id, isouter=True).filter(sa_sql.and_(*filters)) return query.one()[0] except sa_orm.exc.NoResultFound: msg = _("Referenced %s is not cached on" " %s." % (image_id, node_reference_url)) LOG.debug(msg) # NOTE(abhishekk): Since image is not cached yet, assuming # hit count as 0 return 0 def get_cached_images(context, node_reference_url): node_id = models.NodeReference.node_reference_id session = get_session() with session.begin(): query = session.query( models.CachedImages).join( models.NodeReference, node_id == models.CachedImages.node_reference_id, isouter=True).filter( models.NodeReference.node_reference_url == node_reference_url) cached_images = [] for image in query.all(): cached_images.append(_cached_image_format(image)) return cached_images @utils.no_4byte_params def delete_all_cached_images(context, node_reference_url): session = get_session() with session.begin(): node_id = session.query(models.NodeReference.node_reference_id).filter( models.NodeReference.node_reference_url == node_reference_url ).scalar_subquery() query = session.query(models.CachedImages) query = query.filter_by(node_reference_id=node_id) query.delete(synchronize_session=False) def delete_cached_image(context, image_id, node_reference_url): session = get_session() with session.begin(): node_id = session.query(models.NodeReference.node_reference_id).filter( models.NodeReference.node_reference_url == node_reference_url ).scalar_subquery() query = session.query(models.CachedImages) query = query.filter( models.CachedImages.image_id == image_id) query = query.filter_by(node_reference_id=node_id) query.delete(synchronize_session=False) def get_least_recently_accessed(context, node_reference_url): node_id = models.NodeReference.node_reference_id session = get_session() with session.begin(): query = session.query( models.CachedImages.image_id).join( models.NodeReference, node_id == models.CachedImages.node_reference_id, isouter=True).filter( models.NodeReference.node_reference_url == node_reference_url) query = query.order_by(models.CachedImages.last_accessed) try: image_id = query.first()[0] except TypeError: # There are no more cached images return None return image_id def is_image_cached_for_node(context, node_reference_url, image_id): node_id = models.NodeReference.node_reference_id filters = [ models.CachedImages.image_id == image_id, models.NodeReference.node_reference_url == node_reference_url, ] session = get_session() with session.begin(): try: query = session.query( models.CachedImages.id).join( models.NodeReference, node_id == models.CachedImages.node_reference_id, isouter=True).filter(sa_sql.and_(*filters)) if query.one()[0]: return True except sa_orm.exc.NoResultFound: msg = _("Referenced %s is not cached on" " %s." % (image_id, node_reference_url)) LOG.debug(msg) return False @utils.no_4byte_params def insert_cache_details(context, node_reference_url, image_id, filesize, checksum=None, last_accessed=None, last_modified=None, hits=None): node_reference = node_reference_get_by_url(context, node_reference_url) session = get_session() accessed = last_accessed or timeutils.utcnow() modified = last_modified or timeutils.utcnow() values = { 'image_id': image_id, 'size': filesize, 'last_accessed': accessed, 'last_modified': modified, 'hits': hits or 0, 'checksum': checksum, 'node_reference_id': node_reference['node_reference_id'] } with session.begin(): cached_image = models.CachedImages() cached_image.update(values.copy()) try: cached_image.save(session=session) except db_exception.DBDuplicateEntry: msg = _("Cache entry for %s for %s" " already exists." % (image_id, node_reference_url)) LOG.debug(msg) @utils.no_4byte_params def update_hit_count(context, image_id, node_reference_url): session = get_session() last_accessed = timeutils.utcnow() with session.begin(): node_id = session.query(models.NodeReference.node_reference_id).filter( models.NodeReference.node_reference_url == node_reference_url ).scalar_subquery() query = session.query(models.CachedImages) query = query.filter( models.CachedImages.image_id == image_id) query = query.filter_by(node_reference_id=node_id) query.update({ 'hits': models.CachedImages.hits + 1, 'last_accessed': last_accessed }, synchronize_session='fetch') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadata.py0000664000175000017500000004630600000000000021201 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from os.path import isfile from os.path import join import re from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import sqlalchemy from sqlalchemy import and_ from sqlalchemy.schema import MetaData from sqlalchemy.sql import select from glance.common import timeutils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) metadata_opts = [ cfg.StrOpt('metadata_source_path', default='/etc/glance/metadefs/', help=_(""" Absolute path to the directory where JSON metadefs files are stored. Glance Metadata Definitions ("metadefs") are served from the database, but are stored in files in the JSON format. The files in this directory are used to initialize the metadefs in the database. Additionally, when metadefs are exported from the database, the files are written to this directory. NOTE: If you plan to export metadefs, make sure that this directory has write permissions set for the user being used to run the glance-api service. Possible values: * String value representing a valid absolute pathname Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(metadata_opts) def get_metadef_namespaces_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_namespaces', meta, autoload_with=conn) def get_metadef_resource_types_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_resource_types', meta, autoload_with=conn) def get_metadef_namespace_resource_types_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_namespace_resource_types', meta, autoload_with=conn) def get_metadef_properties_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_properties', meta, autoload_with=conn) def get_metadef_objects_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_objects', meta, autoload_with=conn) def get_metadef_tags_table(meta, conn): with conn.begin(): return sqlalchemy.Table('metadef_tags', meta, autoload_with=conn) def _get_resource_type_id(meta, conn, name): rt_table = get_metadef_resource_types_table(meta, conn) with conn.begin(): resource_type = conn.execute( select(rt_table.c.id).where( rt_table.c.name == name ).select_from(rt_table) ).fetchone() if resource_type: return resource_type[0] return None def _get_resource_type(meta, conn, resource_type_id): rt_table = get_metadef_resource_types_table(meta, conn) with conn.begin(): return conn.execute( rt_table.select().where( rt_table.c.id == resource_type_id ) ).fetchone() def _get_namespace_resource_types(meta, conn, namespace_id): namespace_resource_types_table = ( get_metadef_namespace_resource_types_table(meta, conn)) with conn.begin(): return conn.execute( namespace_resource_types_table.select().where( namespace_resource_types_table.c.namespace_id == namespace_id ) ).fetchall() def _get_namespace_resource_type_by_ids(meta, conn, namespace_id, rt_id): namespace_resource_types_table = ( get_metadef_namespace_resource_types_table(meta, conn)) with conn.begin(): return conn.execute( namespace_resource_types_table.select().where(and_( namespace_resource_types_table.c.namespace_id == namespace_id, namespace_resource_types_table.c.resource_type_id == rt_id) ) ).fetchone() def _get_properties(meta, conn, namespace_id): properties_table = get_metadef_properties_table(meta, conn) with conn.begin(): return conn.execute( properties_table.select().where( properties_table.c.namespace_id == namespace_id ) ).fetchall() def _get_objects(meta, conn, namespace_id): objects_table = get_metadef_objects_table(meta, conn) with conn.begin(): return conn.execute( objects_table.select().where( objects_table.c.namespace_id == namespace_id) ).fetchall() def _get_tags(meta, conn, namespace_id): tags_table = get_metadef_tags_table(meta, conn) with conn.begin(): return conn.execute( tags_table.select().where( tags_table.c.namespace_id == namespace_id ) ).fetchall() def _get_resource_id(table, conn, namespace_id, resource_name): with conn.begin(): resource = conn.execute( select(table.c.id).where( and_( table.c.namespace_id == namespace_id, table.c.name == resource_name, ) ).select_from(table) ).fetchone() if resource: return resource[0] return None def _clear_metadata(meta, conn): metadef_tables = [get_metadef_properties_table(meta, conn), get_metadef_objects_table(meta, conn), get_metadef_tags_table(meta, conn), get_metadef_namespace_resource_types_table(meta, conn), get_metadef_namespaces_table(meta, conn), get_metadef_resource_types_table(meta, conn)] with conn.begin(): for table in metadef_tables: conn.execute(table.delete()) LOG.info(_LI("Table %s has been cleared"), table) def _clear_namespace_metadata(meta, conn, namespace_id): metadef_tables = [get_metadef_properties_table(meta, conn), get_metadef_objects_table(meta, conn), get_metadef_tags_table(meta, conn), get_metadef_namespace_resource_types_table(meta, conn)] namespaces_table = get_metadef_namespaces_table(meta, conn) with conn.begin(): for table in metadef_tables: conn.execute( table.delete().where(table.c.namespace_id == namespace_id)) conn.execute( namespaces_table.delete().where( namespaces_table.c.id == namespace_id)) def _populate_metadata(meta, conn, metadata_path=None, merge=False, prefer_new=False, overwrite=False): if not metadata_path: metadata_path = CONF.metadata_source_path try: if isfile(metadata_path): json_schema_files = [metadata_path] else: json_schema_files = [f for f in os.listdir(metadata_path) if isfile(join(metadata_path, f)) and f.endswith('.json')] except OSError as e: LOG.error(encodeutils.exception_to_unicode(e)) return if not json_schema_files: LOG.error(_LE("Json schema files not found in %s. Aborting."), metadata_path) return namespaces_table = get_metadef_namespaces_table(meta, conn) namespace_rt_table = get_metadef_namespace_resource_types_table(meta, conn) objects_table = get_metadef_objects_table(meta, conn) tags_table = get_metadef_tags_table(meta, conn) properties_table = get_metadef_properties_table(meta, conn) resource_types_table = get_metadef_resource_types_table(meta, conn) for json_schema_file in json_schema_files: try: file = join(metadata_path, json_schema_file) with open(file) as json_file: metadata = json.load(json_file) except Exception as e: LOG.error(_LE("Failed to parse json file %(file_path)s while " "populating metadata due to: %(error_msg)s"), {"file_path": file, "error_msg": encodeutils.exception_to_unicode(e)}) continue values = { 'namespace': metadata.get('namespace'), 'display_name': metadata.get('display_name'), 'description': metadata.get('description'), 'visibility': metadata.get('visibility'), 'protected': metadata.get('protected'), 'owner': metadata.get('owner', 'admin') } with conn.begin(): db_namespace = conn.execute( select( namespaces_table.c.id ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ) ).fetchone() if db_namespace and overwrite: LOG.info(_LI("Overwriting namespace %s"), values['namespace']) _clear_namespace_metadata(meta, db_namespace[0]) db_namespace = None if not db_namespace: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(conn, namespaces_table, values) with conn.begin(): db_namespace = conn.execute( select( namespaces_table.c.id ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ) ).fetchone() elif not merge: LOG.info(_LI("Skipping namespace %s. It already exists in the " "database."), values['namespace']) continue elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(namespaces_table, values, namespaces_table.c.id, db_namespace[0]) namespace_id = db_namespace[0] for resource_type in metadata.get('resource_type_associations', []): rt_id = _get_resource_type_id(meta, conn, resource_type['name']) if not rt_id: val = { 'name': resource_type['name'], 'created_at': timeutils.utcnow(), 'protected': True } _insert_data_to_db(conn, resource_types_table, val) rt_id = _get_resource_type_id( meta, conn, resource_type['name']) elif prefer_new: val = {'updated_at': timeutils.utcnow()} _update_data_in_db(resource_types_table, val, resource_types_table.c.id, rt_id) values = { 'namespace_id': namespace_id, 'resource_type_id': rt_id, 'properties_target': resource_type.get( 'properties_target'), 'prefix': resource_type.get('prefix') } namespace_resource_type = _get_namespace_resource_type_by_ids( meta, conn, namespace_id, rt_id) if not namespace_resource_type: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(conn, namespace_rt_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_rt_association(namespace_rt_table, values, rt_id, namespace_id) for name, schema in metadata.get('properties', {}).items(): values = { 'name': name, 'namespace_id': namespace_id, 'json_schema': json.dumps(schema) } property_id = _get_resource_id( properties_table, conn, namespace_id, name, ) if not property_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(conn, properties_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(properties_table, values, properties_table.c.id, property_id) for object in metadata.get('objects', []): values = { 'name': object['name'], 'description': object.get('description'), 'namespace_id': namespace_id, 'json_schema': json.dumps( object.get('properties')) } object_id = _get_resource_id(objects_table, conn, namespace_id, object['name']) if not object_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(conn, objects_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(objects_table, values, objects_table.c.id, object_id) for tag in metadata.get('tags', []): values = { 'name': tag.get('name'), 'namespace_id': namespace_id, } tag_id = _get_resource_id( tags_table, conn, namespace_id, tag['name']) if not tag_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(conn, tags_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(tags_table, values, tags_table.c.id, tag_id) LOG.info(_LI("File %s loaded to database."), file) LOG.info(_LI("Metadata loading finished")) def _insert_data_to_db(conn, table, values, log_exception=True): try: with conn.begin(): conn.execute(table.insert().values(values)) except sqlalchemy.exc.IntegrityError: if log_exception: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _update_data_in_db(conn, table, values, column, value): try: with conn.begin(): conn.execute( table.update().values(values).where(column == value) ) except sqlalchemy.exc.IntegrityError: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _update_rt_association(conn, table, values, rt_id, namespace_id): try: with conn.begin(): conn.execute( table.update().values(values).where( and_( table.c.resource_type_id == rt_id, table.c.namespace_id == namespace_id, ) ) ) except sqlalchemy.exc.IntegrityError: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _export_data_to_file(meta, conn, path): if not path: path = CONF.metadata_source_path namespace_table = get_metadef_namespaces_table(meta) with conn.begin(): namespaces = conn.execute(namespace_table.select()).fetchall() pattern = re.compile(r'[\W_]+', re.UNICODE) for id, namespace in enumerate(namespaces, start=1): namespace_id = namespace['id'] namespace_file_name = pattern.sub('', namespace['display_name']) values = { 'namespace': namespace['namespace'], 'display_name': namespace['display_name'], 'description': namespace['description'], 'visibility': namespace['visibility'], 'protected': namespace['protected'], 'resource_type_associations': [], 'properties': {}, 'objects': [], 'tags': [] } namespace_resource_types = _get_namespace_resource_types( meta, conn, namespace_id) db_objects = _get_objects(meta, conn, namespace_id) db_properties = _get_properties(meta, conn, namespace_id) db_tags = _get_tags(meta, conn, namespace_id) resource_types = [] for namespace_resource_type in namespace_resource_types: resource_type = _get_resource_type( meta, conn, namespace_resource_type['resource_type_id']) resource_types.append({ 'name': resource_type['name'], 'prefix': namespace_resource_type['prefix'], 'properties_target': namespace_resource_type[ 'properties_target'] }) values.update({ 'resource_type_associations': resource_types }) objects = [] for object in db_objects: objects.append({ "name": object['name'], "description": object['description'], "properties": json.loads(object['json_schema']) }) values.update({ 'objects': objects }) properties = {} for property in db_properties: properties.update({ property['name']: json.loads(property['json_schema']) }) values.update({ 'properties': properties }) tags = [] for tag in db_tags: tags.append({ "name": tag['name'] }) values.update({ 'tags': tags }) try: file_name = ''.join([path, namespace_file_name, '.json']) if isfile(file_name): LOG.info(_LI("Overwriting: %s"), file_name) with open(file_name, 'w') as json_file: json_file.write(json.dumps(values)) except Exception as e: LOG.exception(encodeutils.exception_to_unicode(e)) LOG.info(_LI("Namespace %(namespace)s saved in %(file)s"), { 'namespace': namespace_file_name, 'file': file_name}) def db_load_metadefs(engine, metadata_path=None, merge=False, prefer_new=False, overwrite=False): meta = MetaData() if not merge and (prefer_new or overwrite): LOG.error(_LE("To use --prefer_new or --overwrite you need to combine " "of these options with --merge option.")) return if prefer_new and overwrite and merge: LOG.error(_LE("Please provide no more than one option from this list: " "--prefer_new, --overwrite")) return with engine.connect() as conn: _populate_metadata( meta, conn, metadata_path, merge, prefer_new, overwrite) def db_unload_metadefs(engine): meta = MetaData() with engine.connect() as conn: _clear_metadata(meta, conn) def db_export_metadefs(engine, metadata_path=None): meta = MetaData() with engine.connect() as conn: _export_data_to_file(meta, conn, metadata_path) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.874305 glance-29.0.0/glance/db/sqlalchemy/metadef_api/0000775000175000017500000000000000000000000021274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/__init__.py0000664000175000017500000000000000000000000023373 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/namespace.py0000664000175000017500000002511600000000000023607 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy.utils import paginate_query from oslo_log import log as logging import sqlalchemy.exc as sa_exc from sqlalchemy import or_ import sqlalchemy.orm as sa_orm from glance.common import exception as exc import glance.db.sqlalchemy.metadef_api as metadef_api from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _is_namespace_visible(context, namespace, status=None): """Return True if the namespace is visible in this context.""" # Is admin == visible if context.is_admin: return True # No owner == visible if namespace['owner'] is None: return True # Is public == visible if 'visibility' in namespace: if namespace['visibility'] == 'public': return True # context.owner has a value and is the namespace owner == visible if context.owner is not None: if context.owner == namespace['owner']: return True # Private return False def _select_namespaces_query(context, session): """Build the query to get all namespaces based on the context""" LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", {'is_admin': context.is_admin, 'owner': context.owner}) # If admin, return everything. query_ns = session.query(models.MetadefNamespace) if context.is_admin: return query_ns else: # If regular user, return only public namespaces. # However, if context.owner has a value, return both # public and private namespaces of the context.owner. if context.owner is not None: query = ( query_ns.filter( or_(models.MetadefNamespace.owner == context.owner, models.MetadefNamespace.visibility == 'public'))) else: query = query_ns.filter( models.MetadefNamespace.visibility == 'public') return query def _get(context, session, namespace_id): """Get a namespace by id, raise if not found""" try: query = session.query(models.MetadefNamespace).filter_by( id=namespace_id) namespace_rec = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition namespace not found for id=%s") % namespace_id) LOG.warning(msg) raise exc.MetadefNamespaceNotFound(msg) # Make sure they are allowed to view it. if not _is_namespace_visible(context, namespace_rec.to_dict()): LOG.debug("Forbidding request, metadata definition namespace=%s" " is not visible.", namespace_rec.namespace) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % namespace_rec.namespace raise exc.MetadefForbidden(emsg) return namespace_rec def _get_by_name(context, session, name): """Get a namespace by name, raise if not found""" try: query = session.query(models.MetadefNamespace).filter_by( namespace=name) namespace_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("Metadata definition namespace=%s was not found.", name) raise exc.MetadefNamespaceNotFound(namespace_name=name) # Make sure they are allowed to view it. if not _is_namespace_visible(context, namespace_rec.to_dict()): LOG.debug("Forbidding request, metadata definition namespace=%s" " is not visible.", name) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % name raise exc.MetadefForbidden(emsg) return namespace_rec def _get_all(context, session, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """Get all namespaces that match zero or more filters. :param filters: dict of filter keys and values. :param marker: namespace id after which to start page :param limit: maximum number of namespaces to return :param sort_key: namespace attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) """ filters = filters or {} query = _select_namespaces_query(context, session) # if visibility filter, apply it to the context based query visibility = filters.pop('visibility', None) if visibility is not None: query = query.filter(models.MetadefNamespace.visibility == visibility) # if id_list filter, apply it to the context based query id_list = filters.pop('id_list', None) if id_list is not None: query = query.filter(models.MetadefNamespace.id.in_(id_list)) marker_namespace = None if marker is not None: marker_namespace = _get(context, session, marker) sort_keys = ['created_at', 'id'] sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys query = paginate_query(query=query, model=models.MetadefNamespace, limit=limit, sort_keys=sort_keys, marker=marker_namespace, sort_dir=sort_dir) return query.all() def _get_all_by_resource_types(context, session, filters, marker=None, limit=None, sort_key=None, sort_dir=None): """get all visible namespaces for the specified resource_types""" resource_types = filters['resource_types'] resource_type_list = resource_types.split(',') db_recs = ( session.query(models.MetadefResourceType) .join(models.MetadefResourceType.associations) .filter(models.MetadefResourceType.name.in_(resource_type_list)) .with_entities( models.MetadefResourceType.name, models.MetadefNamespaceResourceType.namespace_id, ) ) namespace_id_list = [] for name, namespace_id in db_recs: namespace_id_list.append(namespace_id) if len(namespace_id_list) == 0: return [] filters2 = filters filters2.update({'id_list': namespace_id_list}) return _get_all(context, session, filters2, marker, limit, sort_key, sort_dir) def get_all(context, session, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None): """List all visible namespaces""" namespaces = [] filters = filters or {} if 'resource_types' in filters: namespaces = _get_all_by_resource_types( context, session, filters, marker, limit, sort_key, sort_dir) else: namespaces = _get_all( context, session, filters, marker, limit, sort_key, sort_dir) return [ns.to_dict() for ns in namespaces] def get(context, session, name): """Get a namespace by name, raise if not found""" namespace_rec = _get_by_name(context, session, name) return namespace_rec.to_dict() def create(context, session, values): """Create a namespace, raise if namespace already exists.""" namespace_name = values['namespace'] namespace = models.MetadefNamespace() metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) namespace.update(values.copy()) try: namespace.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create the metadata definition namespace." " Namespace=%s already exists.", namespace_name) raise exc.MetadefDuplicateNamespace( namespace_name=namespace_name) return namespace.to_dict() def update(context, session, namespace_id, values): """Update a namespace, raise if not found/visible or duplicate result""" namespace_rec = _get(context, session, namespace_id) metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) try: namespace_rec.update(values.copy()) namespace_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition namespace with the same name of %s", values['namespace']) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition namespace with the same name of %s") % values['namespace']) raise exc.MetadefDuplicateNamespace(emsg) return namespace_rec.to_dict() def delete(context, session, name): """Raise if not found, has references or not visible""" namespace_rec = _get_by_name(context, session, name) try: session.delete(namespace_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Metadata definition namespace=%s not deleted. " "Other records still refer to it.", name) raise exc.MetadefIntegrityError( record_type='namespace', record_name=name) else: raise return namespace_rec.to_dict() def delete_cascade(context, session, name): """Raise if not found, has references or not visible""" namespace_rec = _get_by_name(context, session, name) try: metadef_api.tag.delete_namespace_content( context, session, namespace_rec.id) metadef_api.object.delete_namespace_content( context, session, namespace_rec.id) metadef_api.property.delete_namespace_content( context, session, namespace_rec.id) metadef_api.resource_type_association.delete_namespace_content( context, session, namespace_rec.id) session.delete(namespace_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Metadata definition namespace=%s not deleted. " "Other records still refer to it.", name) raise exc.MetadefIntegrityError( record_type='namespace', record_name=name) else: raise return namespace_rec.to_dict() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/object.py0000664000175000017500000001314600000000000023121 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _get(context, session, object_id): try: query = session.query(models.MetadefObject).filter_by(id=object_id) metadef_object = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition object not found for id=%s") % object_id) LOG.warning(msg) raise exc.MetadefObjectNotFound(msg) return metadef_object def _get_by_name(context, session, namespace_name, name): namespace = namespace_api.get(context, session, namespace_name) try: query = session.query(models.MetadefObject).filter_by( name=name, namespace_id=namespace['id']) metadef_object = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition object with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefObjectNotFound(object_name=name, namespace_name=namespace_name) return metadef_object def get_all(context, session, namespace_name): namespace = namespace_api.get(context, session, namespace_name) query = session.query(models.MetadefObject).filter_by( namespace_id=namespace['id']) md_objects = query.all() md_objects_list = [] for obj in md_objects: md_objects_list.append(obj.to_dict()) return md_objects_list def create(context, session, namespace_name, values): namespace = namespace_api.get(context, session, namespace_name) values.update({'namespace_id': namespace['id']}) md_object = models.MetadefObject() metadef_utils.drop_protected_attrs(models.MetadefObject, values) md_object.update(values.copy()) try: md_object.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("A metadata definition object with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': md_object.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateObject( object_name=md_object.name, namespace_name=namespace_name) return md_object.to_dict() def get(context, session, namespace_name, name): md_object = _get_by_name(context, session, namespace_name, name) return md_object.to_dict() def update(context, session, namespace_name, object_id, values): """Update an object, raise if ns not found/visible or duplicate result""" namespace_api.get(context, session, namespace_name) md_object = _get(context, session, object_id) metadef_utils.drop_protected_attrs(models.MetadefObject, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: md_object.update(values.copy()) md_object.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition object with same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': md_object.name, 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition object with the same name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': md_object.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateObject(emsg) return md_object.to_dict() def delete(context, session, namespace_name, object_name): namespace_api.get(context, session, namespace_name) md_object = _get_by_name(context, session, namespace_name, object_name) session.delete(md_object) session.flush() return md_object.to_dict() def delete_namespace_content(context, session, namespace_id): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefObject).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, session, namespace_name): namespace = namespace_api.get(context, session, namespace_name) return delete_namespace_content(context, session, namespace['id']) def count(context, session, namespace_name): """Get the count of objects for a namespace, raise if ns not found""" namespace = namespace_api.get(context, session, namespace_name) query = session.query(func.count(models.MetadefObject.id)).filter_by( namespace_id=namespace['id']) return query.scalar() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/property.py0000664000175000017500000001370500000000000023540 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api from glance.db.sqlalchemy.metadef_api import utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _get(context, session, property_id): try: query = session.query(models.MetadefProperty).filter_by(id=property_id) property_rec = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition property not found for id=%s") % property_id) LOG.warning(msg) raise exc.MetadefPropertyNotFound(msg) return property_rec def _get_by_name(context, session, namespace_name, name): """get a property; raise if ns not found/visible or property not found""" namespace = namespace_api.get(context, session, namespace_name) try: query = session.query(models.MetadefProperty).filter_by( name=name, namespace_id=namespace['id']) property_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition property with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefPropertyNotFound(property_name=name, namespace_name=namespace_name) return property_rec def get(context, session, namespace_name, name): """get a property; raise if ns not found/visible or property not found""" property_rec = _get_by_name(context, session, namespace_name, name) return property_rec.to_dict() def get_all(context, session, namespace_name): namespace = namespace_api.get(context, session, namespace_name) query = session.query(models.MetadefProperty).filter_by( namespace_id=namespace['id']) properties = query.all() properties_list = [] for prop in properties: properties_list.append(prop.to_dict()) return properties_list def create(context, session, namespace_name, values): namespace = namespace_api.get(context, session, namespace_name) values.update({'namespace_id': namespace['id']}) property_rec = models.MetadefProperty() metadef_utils.drop_protected_attrs(models.MetadefProperty, values) property_rec.update(values.copy()) try: property_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create metadata definition property. A property" " with name=%(name)s already exists in" " namespace=%(namespace_name)s.", {'name': property_rec.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateProperty( property_name=property_rec.name, namespace_name=namespace_name) return property_rec.to_dict() def update(context, session, namespace_name, property_id, values): """Update a property, raise if ns not found/visible or duplicate result""" namespace_api.get(context, session, namespace_name) property_rec = _get(context, session, property_id) metadef_utils.drop_protected_attrs(models.MetadefProperty, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: property_rec.update(values.copy()) property_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition property with the same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': property_rec.name, 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition property with the same name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': property_rec.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateProperty(emsg) return property_rec.to_dict() def delete(context, session, namespace_name, property_name): property_rec = _get_by_name( context, session, namespace_name, property_name) if property_rec: session.delete(property_rec) session.flush() return property_rec.to_dict() def delete_namespace_content(context, session, namespace_id): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefProperty).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, session, namespace_name): namespace = namespace_api.get(context, session, namespace_name) return delete_namespace_content(context, session, namespace['id']) def count(context, session, namespace_name): """Get the count of properties for a namespace, raise if ns not found""" namespace = namespace_api.get(context, session, namespace_name) query = session.query(func.count(models.MetadefProperty.id)).filter_by( namespace_id=namespace['id']) return query.scalar() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/resource_type.py0000664000175000017500000000706300000000000024544 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging import sqlalchemy.exc as sa_exc import sqlalchemy.orm as sa_orm from glance.common import exception as exc import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models LOG = logging.getLogger(__name__) def get(context, session, name): """Get a resource type, raise if not found""" try: query = session.query(models.MetadefResourceType).filter_by(name=name) resource_type = query.one() except sa_orm.exc.NoResultFound: LOG.debug("No metadata definition resource-type found with name %s", name) raise exc.MetadefResourceTypeNotFound(resource_type_name=name) return resource_type.to_dict() def get_all(context, session): """Get a list of all resource types""" query = session.query(models.MetadefResourceType) resource_types = query.all() resource_types_list = [] for rt in resource_types: resource_types_list.append(rt.to_dict()) return resource_types_list def create(context, session, values): """Create a resource_type, raise if it already exists.""" resource_type = models.MetadefResourceType() metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) resource_type.update(values.copy()) try: resource_type.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create the metadata definition resource-type. " "A resource-type with name=%s already exists.", resource_type.name) raise exc.MetadefDuplicateResourceType( resource_type_name=resource_type.name) return resource_type.to_dict() def update(context, session, values): """Update a resource type, raise if not found""" name = values['name'] metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) db_rec = get(context, session, name) db_rec.update(values.copy()) db_rec.save(session=session) return db_rec.to_dict() def delete(context, session, name): """Delete a resource type or raise if not found or is protected""" db_rec = get(context, session, name) if db_rec.protected is True: LOG.debug("Delete forbidden. Metadata definition resource-type %s is a" " seeded-system type and can not be deleted.", name) raise exc.ProtectedMetadefResourceTypeSystemDelete( resource_type_name=name) try: session.delete(db_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Could not delete Metadata definition resource-type %s" ". It still has content", name) raise exc.MetadefIntegrityError( record_type='resource-type', record_name=name) else: raise return db_rec.to_dict() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/resource_type_association.py0000664000175000017500000002011700000000000027133 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api from glance.db.sqlalchemy.metadef_api import resource_type as resource_type_api from glance.db.sqlalchemy.metadef_api import utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models LOG = logging.getLogger(__name__) def _to_db_dict(namespace_id, resource_type_id, model_dict): """transform a model dict to a metadef_namespace_resource_type dict""" db_dict = {'namespace_id': namespace_id, 'resource_type_id': resource_type_id, 'properties_target': model_dict['properties_target'], 'prefix': model_dict['prefix']} return db_dict def _to_model_dict(resource_type_name, ns_res_type_dict): """transform a metadef_namespace_resource_type dict to a model dict""" model_dict = {'name': resource_type_name, 'properties_target': ns_res_type_dict['properties_target'], 'prefix': ns_res_type_dict['prefix'], 'created_at': ns_res_type_dict['created_at'], 'updated_at': ns_res_type_dict['updated_at']} return model_dict def _set_model_dict(resource_type_name, properties_target, prefix, created_at, updated_at): """return a model dict set with the passed in key values""" model_dict = {'name': resource_type_name, 'properties_target': properties_target, 'prefix': prefix, 'created_at': created_at, 'updated_at': updated_at} return model_dict def _get(context, session, namespace_name, resource_type_name, namespace_id, resource_type_id): """Get a namespace resource_type association""" # visibility check assumed done in calling routine via namespace_get try: query = session.query(models.MetadefNamespaceResourceType).filter_by( namespace_id=namespace_id, resource_type_id=resource_type_id) db_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace_name=%(namespace_name)s was not found.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exc.MetadefResourceTypeAssociationNotFound( resource_type_name=resource_type_name, namespace_name=namespace_name) return db_rec def _create_association( context, session, namespace_name, resource_type_name, values, ): """Create an association, raise if it already exists.""" namespace_resource_type_rec = models.MetadefNamespaceResourceType() metadef_utils.drop_protected_attrs( models.MetadefNamespaceResourceType, values) # values['updated_at'] = timeutils.utcnow() # TS mixin should do this namespace_resource_type_rec.update(values.copy()) try: namespace_resource_type_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace=%(namespace_name)s, already exists.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateResourceTypeAssociation( resource_type_name=resource_type_name, namespace_name=namespace_name) return namespace_resource_type_rec.to_dict() def _delete(context, session, namespace_name, resource_type_name, namespace_id, resource_type_id): """Delete a resource type association or raise if not found.""" db_rec = _get( context, session, namespace_name, resource_type_name, namespace_id, resource_type_id) session.delete(db_rec) session.flush() return db_rec.to_dict() def get(context, session, namespace_name, resource_type_name): """Get a resource_type associations; raise if not found""" namespace = namespace_api.get( context, session, namespace_name) resource_type = resource_type_api.get( context, session, resource_type_name) found = _get( context, session, namespace_name, resource_type_name, namespace['id'], resource_type['id']) return _to_model_dict(resource_type_name, found) def get_all_by_namespace(context, session, namespace_name): """List resource_type associations by namespace, raise if not found""" # namespace get raises an exception if not visible namespace = namespace_api.get( context, session, namespace_name) db_recs = ( session.query(models.MetadefResourceType) .join(models.MetadefResourceType.associations) .filter_by(namespace_id=namespace['id']) .with_entities( models.MetadefResourceType.name, models.MetadefNamespaceResourceType.properties_target, models.MetadefNamespaceResourceType.prefix, models.MetadefNamespaceResourceType.created_at, models.MetadefNamespaceResourceType.updated_at, ) ) model_dict_list = [] for name, properties_target, prefix, created_at, updated_at in db_recs: model_dict_list.append( _set_model_dict (name, properties_target, prefix, created_at, updated_at) ) return model_dict_list def create(context, session, namespace_name, values): """Create an association, raise if already exists or ns not found.""" namespace = namespace_api.get( context, session, namespace_name) # if the resource_type does not exist, create it resource_type_name = values['name'] metadef_utils.drop_protected_attrs( models.MetadefNamespaceResourceType, values) try: resource_type = resource_type_api.get( context, session, resource_type_name) except exc.NotFound: resource_type = None LOG.debug("Creating resource-type %s", resource_type_name) if resource_type is None: resource_type_dict = {'name': resource_type_name, 'protected': False} resource_type = resource_type_api.create( context, session, resource_type_dict) # Create the association record, set the field values ns_resource_type_dict = _to_db_dict( namespace['id'], resource_type['id'], values) new_rec = _create_association( context, session, namespace_name, resource_type_name, ns_resource_type_dict) return _to_model_dict(resource_type_name, new_rec) def delete(context, session, namespace_name, resource_type_name): """Delete an association or raise if not found""" namespace = namespace_api.get( context, session, namespace_name) resource_type = resource_type_api.get( context, session, resource_type_name) deleted = _delete( context, session, namespace_name, resource_type_name, namespace['id'], resource_type['id']) return _to_model_dict(resource_type_name, deleted) def delete_namespace_content(context, session, namespace_id): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefNamespaceResourceType).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/tag.py0000664000175000017500000001704400000000000022427 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy.utils import paginate_query from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _LW LOG = logging.getLogger(__name__) def _get(context, session, id): try: query = (session.query(models.MetadefTag).filter_by(id=id)) metadef_tag = query.one() except sa_orm.exc.NoResultFound: msg = (_LW("Metadata tag not found for id %s") % id) LOG.warning(msg) raise exc.MetadefTagNotFound(message=msg) return metadef_tag def _get_by_name(context, session, namespace_name, name): namespace = namespace_api.get(context, session, namespace_name) try: query = (session.query(models.MetadefTag).filter_by( name=name, namespace_id=namespace['id'])) metadef_tag = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata tag with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefTagNotFound(name=name, namespace_name=namespace_name) return metadef_tag def get_all(context, session, namespace_name, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """Get all tags that match zero or more filters. :param filters: dict of filter keys and values. :param marker: tag id after which to start page :param limit: maximum number of namespaces to return :param sort_key: namespace attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) """ namespace = namespace_api.get(context, session, namespace_name) query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace['id'])) marker_tag = None if marker is not None: marker_tag = _get(context, session, marker) sort_keys = ['created_at', 'id'] sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys query = paginate_query(query=query, model=models.MetadefTag, limit=limit, sort_keys=sort_keys, marker=marker_tag, sort_dir=sort_dir) metadef_tag = query.all() metadef_tag_list = [] for tag in metadef_tag: metadef_tag_list.append(tag.to_dict()) return metadef_tag_list def create(context, session, namespace_name, values): namespace = namespace_api.get(context, session, namespace_name) values.update({'namespace_id': namespace['id']}) metadef_tag = models.MetadefTag() metadef_utils.drop_protected_attrs(models.MetadefTag, values) metadef_tag.update(values.copy()) try: metadef_tag.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("A metadata tag name=%(name)s" " already exists in namespace=%(namespace_name)s." " (Please note that metadata tag names are" " case insensitive).", {'name': metadef_tag.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=metadef_tag.name, namespace_name=namespace_name) return metadef_tag.to_dict() def create_tags(context, session, namespace_name, tag_list, can_append): metadef_tags_list = [] if tag_list: namespace = namespace_api.get(context, session, namespace_name) try: if not can_append: query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace['id'])) query.delete(synchronize_session='fetch') for value in tag_list: value.update({'namespace_id': namespace['id']}) metadef_utils.drop_protected_attrs( models.MetadefTag, value) metadef_tag = models.MetadefTag() metadef_tag.update(value.copy()) metadef_tag.save(session=session) metadef_tags_list.append(metadef_tag.to_dict()) except db_exc.DBDuplicateEntry: LOG.debug("A metadata tag name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': metadef_tag.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=metadef_tag.name, namespace_name=namespace_name) return metadef_tags_list def get(context, session, namespace_name, name): metadef_tag = _get_by_name(context, session, namespace_name, name) return metadef_tag.to_dict() def update(context, session, namespace_name, id, values): """Update an tag, raise if ns not found/visible or duplicate result""" namespace_api.get(context, session, namespace_name) metadata_tag = _get(context, session, id) metadef_utils.drop_protected_attrs(models.MetadefTag, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: metadata_tag.update(values.copy()) metadata_tag.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata tag with same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': values['name'], 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=values['name'], namespace_name=namespace_name) return metadata_tag.to_dict() def delete(context, session, namespace_name, name): namespace_api.get(context, session, namespace_name) md_tag = _get_by_name(context, session, namespace_name, name) session.delete(md_tag) session.flush() return md_tag.to_dict() def delete_namespace_content(context, session, namespace_id): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace_id)) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, session, namespace_name): namespace = namespace_api.get(context, session, namespace_name) return delete_namespace_content(context, session, namespace['id']) def count(context, session, namespace_name): """Get the count of objects for a namespace, raise if ns not found""" namespace = namespace_api.get(context, session, namespace_name) query = (session.query(func.count(models.MetadefTag.id)).filter_by( namespace_id=namespace['id'])) return query.scalar() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/metadef_api/utils.py0000664000175000017500000000163100000000000023007 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def drop_protected_attrs(model_class, values): """ Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/models.py0000664000175000017500000002736000000000000020703 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for glance data """ import uuid from oslo_db.sqlalchemy import models from oslo_serialization import jsonutils from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Enum from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy.orm import declarative_base from sqlalchemy.orm import backref, relationship from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator from sqlalchemy import UniqueConstraint from glance.common import timeutils BASE = declarative_base() class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string""" impl = Text def process_bind_param(self, value, dialect): if value is not None: value = jsonutils.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class GlanceBase(models.ModelBase, models.TimestampMixin): """Base class for Glance Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set([ "created_at", "updated_at", "deleted_at", "deleted"]) def save(self, session=None): from glance.db.sqlalchemy import api as db_api super(GlanceBase, self).save(session or db_api.get_session()) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(vsergeyev): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow()) # TODO(boris-42): Use SoftDeleteMixin instead of deleted Column after # migration that provides UniqueConstraints and change # type of this column. deleted_at = Column(DateTime) deleted = Column(Boolean, nullable=False, default=False) def delete(self, session=None): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = self.__dict__.copy() # NOTE(flaper87): Remove # private state instance # It is not serializable # and causes CircularReference d.pop("_sa_instance_state") return d class Image(BASE, GlanceBase): """Represents an image in the datastore.""" __tablename__ = 'images' __table_args__ = (Index('checksum_image_idx', 'checksum'), Index('visibility_image_idx', 'visibility'), Index('ix_images_deleted', 'deleted'), Index('owner_image_idx', 'owner'), Index('created_at_image_idx', 'created_at'), Index('updated_at_image_idx', 'updated_at'), Index('os_hidden_image_idx', 'os_hidden'), Index('os_hash_value_image_idx', 'os_hash_value')) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) name = Column(String(255)) disk_format = Column(String(20)) container_format = Column(String(20)) size = Column(BigInteger().with_variant(Integer, "sqlite")) virtual_size = Column(BigInteger().with_variant(Integer, "sqlite")) status = Column(String(30), nullable=False) visibility = Column(Enum('private', 'public', 'shared', 'community', name='image_visibility'), nullable=False, server_default='shared') checksum = Column(String(32)) os_hash_algo = Column(String(64)) os_hash_value = Column(String(128)) min_disk = Column(Integer, nullable=False, default=0) min_ram = Column(Integer, nullable=False, default=0) owner = Column(String(255)) protected = Column(Boolean, nullable=False, default=False, server_default=sql.expression.false()) os_hidden = Column(Boolean, nullable=False, default=False, server_default=sql.expression.false()) class ImageProperty(BASE, GlanceBase): """Represents an image properties in the datastore.""" __tablename__ = 'image_properties' __table_args__ = (Index('ix_image_properties_image_id', 'image_id'), Index('ix_image_properties_deleted', 'deleted'), UniqueConstraint('image_id', 'name', name='ix_image_properties_' 'image_id_name'),) id = Column(Integer, primary_key=True) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('properties')) name = Column(String(255), nullable=False) value = Column(Text) class ImageTag(BASE, GlanceBase): """Represents an image tag in the datastore.""" __tablename__ = 'image_tags' __table_args__ = (Index('ix_image_tags_image_id', 'image_id'), Index('ix_image_tags_image_id_tag_value', 'image_id', 'value'),) id = Column(Integer, primary_key=True, nullable=False) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('tags')) value = Column(String(255), nullable=False) class ImageLocation(BASE, GlanceBase): """Represents an image location in the datastore.""" __tablename__ = 'image_locations' __table_args__ = (Index('ix_image_locations_image_id', 'image_id'), Index('ix_image_locations_deleted', 'deleted'),) id = Column(Integer, primary_key=True, nullable=False) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('locations')) value = Column(Text(), nullable=False) meta_data = Column(JSONEncodedDict(), default={}) status = Column(String(30), server_default='active', nullable=False) class ImageMember(BASE, GlanceBase): """Represents an image members in the datastore.""" __tablename__ = 'image_members' unique_constraint_key_name = 'image_members_image_id_member_deleted_at_key' __table_args__ = (Index('ix_image_members_deleted', 'deleted'), Index('ix_image_members_image_id', 'image_id'), Index('ix_image_members_image_id_member', 'image_id', 'member'), UniqueConstraint('image_id', 'member', 'deleted_at', name=unique_constraint_key_name),) id = Column(Integer, primary_key=True) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('members')) member = Column(String(255), nullable=False) can_share = Column(Boolean, nullable=False, default=False) status = Column(String(20), nullable=False, default="pending", server_default='pending') class Task(BASE, GlanceBase): """Represents an task in the datastore""" __tablename__ = 'tasks' __table_args__ = (Index('ix_tasks_type', 'type'), Index('ix_tasks_status', 'status'), Index('ix_tasks_owner', 'owner'), Index('ix_tasks_deleted', 'deleted'), Index('ix_tasks_image_id', 'image_id'), Index('ix_tasks_updated_at', 'updated_at')) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) type = Column(String(30), nullable=False) status = Column(String(30), nullable=False) owner = Column(String(255), nullable=False) expires_at = Column(DateTime, nullable=True) image_id = Column(String(36), nullable=True) request_id = Column(String(64), nullable=True) user_id = Column(String(64), nullable=True) class TaskInfo(BASE, models.ModelBase): """Represents task info in the datastore""" __tablename__ = 'task_info' task_id = Column(String(36), ForeignKey('tasks.id'), primary_key=True, nullable=False) task = relationship(Task, backref=backref('info', uselist=False)) # NOTE(nikhil): input and result are stored as text in the DB. # SQLAlchemy marshals the data to/from JSON using custom type # JSONEncodedDict. It uses simplejson underneath. input = Column(JSONEncodedDict()) result = Column(JSONEncodedDict()) message = Column(Text) class NodeReference(BASE, models.ModelBase): """Represents node info in the datastore""" __tablename__ = 'node_reference' __table_args__ = (UniqueConstraint( 'node_reference_url', name='uq_node_reference_node_reference_url'),) node_reference_id = Column(BigInteger().with_variant(Integer, 'sqlite'), primary_key=True, nullable=False, autoincrement=True) node_reference_url = Column(String(length=255), nullable=False) class CachedImages(BASE, models.ModelBase): """Represents an image tag in the datastore.""" __tablename__ = 'cached_images' __table_args__ = (UniqueConstraint( 'image_id', 'node_reference_id', name='ix_cached_images_image_id_node_reference_id'),) id = Column(BigInteger().with_variant(Integer, 'sqlite'), primary_key=True, autoincrement=True, nullable=False) image_id = Column(String(36), nullable=False) last_accessed = Column(DateTime, nullable=False) last_modified = Column(DateTime, nullable=False) size = Column(BigInteger(), nullable=False) hits = Column(Integer, nullable=False) checksum = Column(String(32), nullable=True) node_reference_id = Column( BigInteger().with_variant(Integer, 'sqlite'), ForeignKey('node_reference.node_reference_id'), nullable=False) def register_models(engine): """Create database tables for all models with the given engine.""" models = (Image, ImageProperty, ImageMember) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (Image, ImageProperty) for model in models: model.metadata.drop_all(engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/models_metadef.py0000664000175000017500000001574100000000000022370 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for glance metadata schema """ from oslo_db.sqlalchemy import models from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy.orm import declarative_base from sqlalchemy.orm import relationship from sqlalchemy import String from sqlalchemy import Text from sqlalchemy import UniqueConstraint from glance.common import timeutils from glance.db.sqlalchemy.models import JSONEncodedDict class DictionaryBase(models.ModelBase): metadata = None def to_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d BASE_DICT = declarative_base(cls=DictionaryBase) class GlanceMetadefBase(models.TimestampMixin): """Base class for Glance Metadef Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set(["created_at", "updated_at"]) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(wko): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow()) class MetadefNamespace(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace in the datastore.""" __tablename__ = 'metadef_namespaces' __table_args__ = (UniqueConstraint('namespace', name='uq_metadef_namespaces' '_namespace'), Index('ix_metadef_namespaces_owner', 'owner') ) id = Column(Integer, primary_key=True, nullable=False) namespace = Column(String(80), nullable=False) display_name = Column(String(80)) description = Column(Text()) visibility = Column(String(32)) protected = Column(Boolean) owner = Column(String(255), nullable=False) class MetadefObject(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema object in the datastore.""" __tablename__ = 'metadef_objects' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_objects_namespace_id' '_name'), Index('ix_metadef_objects_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) description = Column(Text()) required = Column(Text()) json_schema = Column(JSONEncodedDict(), default={}, nullable=False) class MetadefProperty(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace-property in the datastore.""" __tablename__ = 'metadef_properties' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_properties_namespace' '_id_name'), Index('ix_metadef_properties_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) json_schema = Column(JSONEncodedDict(), default={}, nullable=False) class MetadefNamespaceResourceType(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace-property in the datastore.""" __tablename__ = 'metadef_namespace_resource_types' __table_args__ = (Index('ix_metadef_ns_res_types_namespace_id', 'namespace_id'), ) resource_type_id = Column(Integer, ForeignKey('metadef_resource_types.id'), primary_key=True, nullable=False) namespace_id = Column(Integer, ForeignKey('metadef_namespaces.id'), primary_key=True, nullable=False) properties_target = Column(String(80)) prefix = Column(String(80)) class MetadefResourceType(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema resource type in the datastore.""" __tablename__ = 'metadef_resource_types' __table_args__ = (UniqueConstraint('name', name='uq_metadef_resource_types_name'), ) id = Column(Integer, primary_key=True, nullable=False) name = Column(String(80), nullable=False) protected = Column(Boolean, nullable=False, default=False) associations = relationship( "MetadefNamespaceResourceType", primaryjoin=id == MetadefNamespaceResourceType.resource_type_id) class MetadefTag(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema tag in the data store.""" __tablename__ = 'metadef_tags' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_tags_namespace_id' '_name'), Index('ix_metadef_tags_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) def register_models(engine): """Create database tables for all models with the given engine.""" models = (MetadefNamespace, MetadefObject, MetadefProperty, MetadefTag, MetadefResourceType, MetadefNamespaceResourceType) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (MetadefObject, MetadefProperty, MetadefNamespaceResourceType, MetadefTag, MetadefNamespace, MetadefResourceType) for model in models: model.metadata.drop_all(engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/sqlalchemy/schema.py0000664000175000017500000000244000000000000020650 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Various conveniences used for migration scripts """ from oslo_log import log as logging import sqlalchemy.types LOG = logging.getLogger(__name__) def String(length): return sqlalchemy.types.String(length=length) def Text(): return sqlalchemy.types.Text(length=None) def Boolean(): return sqlalchemy.types.Boolean(create_constraint=True, name=None) def DateTime(): return sqlalchemy.types.DateTime(timezone=False) def Integer(): return sqlalchemy.types.Integer() def BigInteger(): return sqlalchemy.types.BigInteger() def PickleType(): return sqlalchemy.types.PickleType() def Numeric(): return sqlalchemy.types.Numeric() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/db/utils.py0000664000175000017500000000473200000000000016414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception from glance.i18n import _ def mutate_image_dict_to_v1(image): """ Replaces a v2-style image dictionary's 'visibility' member with the equivalent v1-style 'is_public' member. """ visibility = image.pop('visibility') is_image_public = 'public' == visibility image['is_public'] = is_image_public return image def ensure_image_dict_v2_compliant(image): """ Accepts an image dictionary that contains a v1-style 'is_public' member and returns the equivalent v2-style image dictionary. """ if ('is_public' in image): if ('visibility' in image): msg = _("Specifying both 'visibility' and 'is_public' is not " "permiitted.") raise exception.Invalid(msg) else: image['visibility'] = ('public' if image.pop('is_public') else 'shared') return image def is_image_visible(context, image, image_member_find, status=None): """Return True if the image is visible in this context.""" # Is admin == image visible if context.is_admin: return True # No owner == image visible if image['owner'] is None: return True # Public or Community visibility == image visible if image['visibility'] in ['public', 'community']: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == image['owner']: return True # Figure out if this image is shared with that tenant if 'shared' == image['visibility']: members = image_member_find(context, image_id=image['id'], member=context.owner, status=status) if members: return True # Private image return False ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.874305 glance-29.0.0/glance/domain/0000775000175000017500000000000000000000000015556 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/domain/__init__.py0000664000175000017500000005655400000000000017706 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import datetime import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from glance.common import exception from glance.common import timeutils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('task_executor', 'glance.common.config', group='task') _delayed_delete_imported = False def _import_delayed_delete(): # glance_store (indirectly) imports glance.domain therefore we can't put # the CONF.import_opt outside - we have to do it in a convoluted/indirect # way! global _delayed_delete_imported if not _delayed_delete_imported: CONF.import_opt('delayed_delete', 'glance_store') _delayed_delete_imported = True class ImageFactory(object): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size'] _reserved_properties = ['owner', 'locations', 'deleted', 'deleted_at', 'direct_url', 'self', 'file', 'schema'] def _check_readonly(self, kwargs): for key in self._readonly_properties: if key in kwargs: raise exception.ReadonlyProperty(property=key) def _check_unexpected(self, kwargs): if kwargs: msg = _('new_image() got unexpected keywords %s') raise TypeError(msg % kwargs.keys()) def _check_reserved(self, properties): if properties is not None: for key in self._reserved_properties: if key in properties: raise exception.ReservedProperty(property=key) def new_image(self, image_id=None, name=None, visibility='shared', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, os_hidden=False, **other_args): extra_properties = extra_properties or {} self._check_readonly(other_args) self._check_unexpected(other_args) self._check_reserved(extra_properties) if image_id is None: image_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at status = 'queued' return Image(image_id=image_id, name=name, status=status, created_at=created_at, updated_at=updated_at, visibility=visibility, min_disk=min_disk, min_ram=min_ram, protected=protected, owner=owner, disk_format=disk_format, container_format=container_format, os_hidden=os_hidden, extra_properties=extra_properties, tags=tags or []) class Image(object): valid_state_targets = { # Each key denotes a "current" state for the image. Corresponding # values list the valid states to which we can jump from that "current" # state. # NOTE(flwang): In v2, we are deprecating the 'killed' status, so it's # allowed to restore image from 'saving' to 'queued' so that upload # can be retried. 'queued': ('saving', 'uploading', 'importing', 'active', 'deleted'), 'saving': ('active', 'killed', 'deleted', 'queued'), 'uploading': ('importing', 'queued', 'deleted'), 'importing': ('active', 'deleted', 'queued'), 'active': ('pending_delete', 'deleted', 'deactivated'), 'killed': ('deleted',), 'pending_delete': ('deleted', 'active'), 'deleted': (), 'deactivated': ('active', 'deleted'), } def __init__(self, image_id, status, created_at, updated_at, **kwargs): self.image_id = image_id self.status = status self.created_at = created_at self.updated_at = updated_at self.name = kwargs.pop('name', None) self.visibility = kwargs.pop('visibility', 'shared') self.os_hidden = kwargs.pop('os_hidden', False) self.min_disk = kwargs.pop('min_disk', 0) self.min_ram = kwargs.pop('min_ram', 0) self.protected = kwargs.pop('protected', False) self.locations = kwargs.pop('locations', []) self.checksum = kwargs.pop('checksum', None) self.os_hash_algo = kwargs.pop('os_hash_algo', None) self.os_hash_value = kwargs.pop('os_hash_value', None) self.owner = kwargs.pop('owner', None) self._disk_format = kwargs.pop('disk_format', None) self._container_format = kwargs.pop('container_format', None) self.size = kwargs.pop('size', None) self.virtual_size = kwargs.pop('virtual_size', None) extra_properties = kwargs.pop('extra_properties', {}) self.extra_properties = ExtraProperties(extra_properties) self.tags = kwargs.pop('tags', []) self.member = kwargs.pop('member', None) if kwargs: message = _("__init__() got unexpected keyword argument '%s'") raise TypeError(message % list(kwargs.keys())[0]) @property def status(self): return self._status @status.setter def status(self, status): has_status = hasattr(self, '_status') if has_status: if status not in self.valid_state_targets[self._status]: kw = {'cur_status': self._status, 'new_status': status} e = exception.InvalidImageStatusTransition(**kw) LOG.debug(e) raise e if self._status in ('queued', 'uploading') and status in ( 'saving', 'active', 'importing'): missing = [k for k in ['disk_format', 'container_format'] if not getattr(self, k)] if len(missing) > 0: if len(missing) == 1: msg = _('Property %s must be set prior to ' 'saving data.') else: msg = _('Properties %s must be set prior to ' 'saving data.') raise ValueError(msg % ', '.join(missing)) # NOTE(flwang): Image size should be cleared as long as the image # status is updated to 'queued' if status == 'queued': self.size = None self.virtual_size = None self._status = status @property def visibility(self): return self._visibility @visibility.setter def visibility(self, visibility): if visibility not in ('community', 'public', 'private', 'shared'): raise ValueError(_('Visibility must be one of "community", ' '"public", "private", or "shared"')) self._visibility = visibility @property def tags(self): return self._tags @tags.setter def tags(self, value): self._tags = set(value) @property def container_format(self): return self._container_format @container_format.setter def container_format(self, value): if (hasattr(self, '_container_format') and self.status not in ('queued', 'importing')): msg = _("Attribute container_format can be only replaced " "for a queued image.") raise exception.Forbidden(message=msg) self._container_format = value @property def disk_format(self): return self._disk_format @disk_format.setter def disk_format(self, value): if (hasattr(self, '_disk_format') and self.status not in ('queued', 'importing')): msg = _("Attribute disk_format can be only replaced " "for a queued image.") raise exception.Forbidden(message=msg) self._disk_format = value @property def min_disk(self): return self._min_disk @min_disk.setter def min_disk(self, value): if value and value < 0: extra_msg = _('Cannot be a negative value') raise exception.InvalidParameterValue(value=value, param='min_disk', extra_msg=extra_msg) self._min_disk = value @property def min_ram(self): return self._min_ram @min_ram.setter def min_ram(self, value): if value and value < 0: extra_msg = _('Cannot be a negative value') raise exception.InvalidParameterValue(value=value, param='min_ram', extra_msg=extra_msg) self._min_ram = value def delete(self): if self.protected: raise exception.ProtectedImageDelete(image_id=self.image_id) if CONF.delayed_delete and self.locations: self.status = 'pending_delete' else: self.status = 'deleted' def deactivate(self): if self.status == 'active': self.status = 'deactivated' elif self.status == 'deactivated': # Noop if already deactivate pass else: LOG.debug("Not allowed to deactivate image in status '%s'", self.status) msg = (_("Not allowed to deactivate image in status '%s'") % self.status) raise exception.Forbidden(message=msg) def reactivate(self): if self.status == 'deactivated': self.status = 'active' elif self.status == 'active': # Noop if already active pass else: LOG.debug("Not allowed to reactivate image in status '%s'", self.status) msg = (_("Not allowed to reactivate image in status '%s'") % self.status) raise exception.Forbidden(message=msg) def get_data(self, *args, **kwargs): raise NotImplementedError() def set_data(self, data, size=None, backend=None, set_active=True): raise NotImplementedError() class ExtraProperties(abc.MutableMapping, dict): def __getitem__(self, key): return dict.__getitem__(self, key) def __setitem__(self, key, value): return dict.__setitem__(self, key, value) def __delitem__(self, key): return dict.__delitem__(self, key) def __eq__(self, other): if isinstance(other, ExtraProperties): return dict.__eq__(self, dict(other)) elif isinstance(other, dict): return dict.__eq__(self, other) else: return False def __ne__(self, other): return not self.__eq__(other) def __len__(self): return dict.__len__(self) def keys(self): return dict.keys(self) class ImageMembership(object): def __init__(self, image_id, member_id, created_at, updated_at, id=None, status=None): self.id = id self.image_id = image_id self.member_id = member_id self.created_at = created_at self.updated_at = updated_at self.status = status @property def status(self): return self._status @status.setter def status(self, status): if status not in ('pending', 'accepted', 'rejected'): msg = _('Status must be "pending", "accepted" or "rejected".') raise ValueError(msg) self._status = status class ImageMemberFactory(object): def new_image_member(self, image, member_id): created_at = timeutils.utcnow() updated_at = created_at return ImageMembership(image_id=image.image_id, member_id=member_id, created_at=created_at, updated_at=updated_at, status='pending') class Task(object): _supported_task_type = ('import', 'api_image_import', 'location_import') _supported_task_status = ('pending', 'processing', 'success', 'failure') def __init__(self, task_id, task_type, status, owner, image_id, user_id, request_id, expires_at, created_at, updated_at, task_input, result, message): if task_type not in self._supported_task_type: raise exception.InvalidTaskType(type=task_type) if status not in self._supported_task_status: raise exception.InvalidTaskStatus(status=status) self.task_id = task_id self._status = status self.type = task_type self.owner = owner self.expires_at = expires_at # NOTE(nikhil): We use '_time_to_live' to determine how long a # task should live from the time it succeeds or fails. task_time_to_live = CONF.task.task_time_to_live self._time_to_live = datetime.timedelta(hours=task_time_to_live) self.created_at = created_at self.updated_at = updated_at self.task_input = task_input self.result = result self.message = message self.image_id = image_id self.request_id = request_id self.user_id = user_id @property def status(self): return self._status @property def message(self): return self._message @message.setter def message(self, message): if message: self._message = str(message) else: self._message = '' def _validate_task_status_transition(self, cur_status, new_status): valid_transitions = { 'pending': ['processing', 'failure'], 'processing': ['success', 'failure'], 'success': [], 'failure': [], } if new_status in valid_transitions[cur_status]: return True else: return False def _set_task_status(self, new_status): if self._validate_task_status_transition(self.status, new_status): old_status = self.status self._status = new_status LOG.info(_LI("Task [%(task_id)s] status changing from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': old_status, 'new_status': new_status}) else: LOG.error(_LE("Task [%(task_id)s] status failed to change from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': self.status, 'new_status': new_status}) raise exception.InvalidTaskStatusTransition( cur_status=self.status, new_status=new_status ) def begin_processing(self): new_status = 'processing' self._set_task_status(new_status) def succeed(self, result): new_status = 'success' self.result = result self._set_task_status(new_status) self.expires_at = timeutils.utcnow() + self._time_to_live def fail(self, message): new_status = 'failure' self.message = message self._set_task_status(new_status) self.expires_at = timeutils.utcnow() + self._time_to_live def run(self, executor): executor.begin_processing(self.task_id) class TaskStub(object): def __init__(self, task_id, task_type, status, owner, expires_at, created_at, updated_at, image_id, user_id, request_id): self.task_id = task_id self._status = status self.type = task_type self.owner = owner self.expires_at = expires_at self.created_at = created_at self.updated_at = updated_at self.image_id = image_id self.request_id = request_id self.user_id = user_id @property def status(self): return self._status class TaskFactory(object): def new_task(self, task_type, owner, image_id, user_id, request_id, task_input=None, **kwargs): task_id = str(uuid.uuid4()) status = 'pending' # Note(nikhil): expires_at would be set on the task, only when it # succeeds or fails. expires_at = None created_at = timeutils.utcnow() updated_at = created_at return Task( task_id, task_type, status, owner, image_id, user_id, request_id, expires_at, created_at, updated_at, task_input, kwargs.get('result'), kwargs.get('message'), ) class TaskExecutorFactory(object): eventlet_deprecation_warned = False def __init__(self, task_repo, image_repo, image_factory, admin_repo=None): self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory self.admin_repo = admin_repo def new_task_executor(self, context): try: # NOTE(flaper87): Backwards compatibility layer. # It'll allow us to provide a deprecation path to # users that are currently consuming the `eventlet` # executor. task_executor = CONF.task.task_executor if task_executor == 'eventlet': # NOTE(jokke): Making sure we do not log the deprecation # warning 1000 times or anything crazy like that. if not TaskExecutorFactory.eventlet_deprecation_warned: msg = _LW("The `eventlet` executor has been deprecated. " "Use `taskflow` instead.") LOG.warning(msg) TaskExecutorFactory.eventlet_deprecation_warned = True task_executor = 'taskflow' executor_cls = ('glance.async_.%s_executor.' 'TaskExecutor' % task_executor) LOG.debug("Loading %s executor", task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory, admin_repo=self.admin_repo) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to load the %s executor provided " "in the config."), CONF.task.task_executor) class MetadefNamespace(object): def __init__(self, namespace_id, namespace, display_name, description, owner, visibility, protected, created_at, updated_at): self.namespace_id = namespace_id self.namespace = namespace self.display_name = display_name self.description = description self.owner = owner self.visibility = visibility or "private" self.protected = protected or False self.created_at = created_at self.updated_at = updated_at def delete(self): if self.protected: raise exception.ProtectedMetadefNamespaceDelete( namespace=self.namespace) class MetadefNamespaceFactory(object): def new_namespace(self, namespace, owner, **kwargs): namespace_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefNamespace( namespace_id, namespace, kwargs.get('display_name'), kwargs.get('description'), owner, kwargs.get('visibility'), kwargs.get('protected'), created_at, updated_at ) class MetadefObject(object): def __init__(self, namespace, object_id, name, created_at, updated_at, required, description, properties): self.namespace = namespace self.object_id = object_id self.name = name self.created_at = created_at self.updated_at = updated_at self.required = required self.description = description self.properties = properties def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefObjectDelete(object_name=self.name) class MetadefObjectFactory(object): def new_object(self, namespace, name, **kwargs): object_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefObject( namespace, object_id, name, created_at, updated_at, kwargs.get('required'), kwargs.get('description'), kwargs.get('properties') ) class MetadefResourceType(object): def __init__(self, namespace, name, prefix, properties_target, created_at, updated_at): self.namespace = namespace self.name = name self.prefix = prefix self.properties_target = properties_target self.created_at = created_at self.updated_at = updated_at def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefResourceTypeAssociationDelete( resource_type=self.name) class MetadefResourceTypeFactory(object): def new_resource_type(self, namespace, name, **kwargs): created_at = timeutils.utcnow() updated_at = created_at return MetadefResourceType( namespace, name, kwargs.get('prefix'), kwargs.get('properties_target'), created_at, updated_at ) class MetadefProperty(object): def __init__(self, namespace, property_id, name, schema): self.namespace = namespace self.property_id = property_id self.name = name self.schema = schema def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefNamespacePropDelete( property_name=self.name) class MetadefPropertyFactory(object): def new_namespace_property(self, namespace, name, schema, **kwargs): property_id = str(uuid.uuid4()) return MetadefProperty( namespace, property_id, name, schema ) class MetadefTag(object): def __init__(self, namespace, tag_id, name, created_at, updated_at): self.namespace = namespace self.tag_id = tag_id self.name = name self.created_at = created_at self.updated_at = updated_at def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefTagDelete(tag_name=self.name) class MetadefTagFactory(object): def new_tag(self, namespace, name, **kwargs): tag_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefTag( namespace, tag_id, name, created_at, updated_at ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/domain/proxy.py0000664000175000017500000004744600000000000017330 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def _proxy(target, attr): def get_attr(self): return getattr(getattr(self, target), attr) def set_attr(self, value): return setattr(getattr(self, target), attr, value) def del_attr(self): return delattr(getattr(self, target), attr) return property(get_attr, set_attr, del_attr) class Helper(object): def __init__(self, proxy_class=None, proxy_kwargs=None): self.proxy_class = proxy_class self.proxy_kwargs = proxy_kwargs or {} def proxy(self, obj): if obj is None or self.proxy_class is None: return obj return self.proxy_class(obj, **self.proxy_kwargs) def unproxy(self, obj): if obj is None or self.proxy_class is None: return obj return obj.base class TaskRepo(object): def __init__(self, base, task_proxy_class=None, task_proxy_kwargs=None): self.base = base self.task_proxy_helper = Helper(task_proxy_class, task_proxy_kwargs) def get(self, task_id): task = self.base.get(task_id) return self.task_proxy_helper.proxy(task) def add(self, task): self.base.add(self.task_proxy_helper.unproxy(task)) def save(self, task): self.base.save(self.task_proxy_helper.unproxy(task)) def remove(self, task): base_task = self.task_proxy_helper.unproxy(task) self.base.remove(base_task) class TaskStubRepo(object): def __init__(self, base, task_stub_proxy_class=None, task_stub_proxy_kwargs=None): self.base = base self.task_stub_proxy_helper = Helper(task_stub_proxy_class, task_stub_proxy_kwargs) def list(self, *args, **kwargs): tasks = self.base.list(*args, **kwargs) return [self.task_stub_proxy_helper.proxy(task) for task in tasks] class Repo(object): def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): self.base = base self.helper = Helper(item_proxy_class, item_proxy_kwargs) def get(self, item_id): return self.helper.proxy(self.base.get(item_id)) def list(self, *args, **kwargs): items = self.base.list(*args, **kwargs) return [self.helper.proxy(item) for item in items] def add(self, item): base_item = self.helper.unproxy(item) result = self.base.add(base_item) return self.helper.proxy(result) def save(self, item, from_state=None): base_item = self.helper.unproxy(item) result = self.base.save(base_item, from_state=from_state) return self.helper.proxy(result) def remove(self, item): base_item = self.helper.unproxy(item) result = self.base.remove(base_item) return self.helper.proxy(result) def set_property_atomic(self, item, name, value): msg = '%s is only valid for images' % __name__ assert hasattr(item, 'image_id'), msg self.base.set_property_atomic(item, name, value) def delete_property_atomic(self, item, name, value): msg = '%s is only valid for images' % __name__ assert hasattr(item, 'image_id'), msg self.base.delete_property_atomic(item, name, value) class MemberRepo(object): def __init__(self, image, base, member_proxy_class=None, member_proxy_kwargs=None): self.image = image self.base = base self.member_proxy_helper = Helper(member_proxy_class, member_proxy_kwargs) def get(self, member_id): member = self.base.get(member_id) return self.member_proxy_helper.proxy(member) def add(self, member): self.base.add(self.member_proxy_helper.unproxy(member)) def list(self, *args, **kwargs): members = self.base.list(*args, **kwargs) return [self.member_proxy_helper.proxy(member) for member in members] def remove(self, member): base_item = self.member_proxy_helper.unproxy(member) result = self.base.remove(base_item) return self.member_proxy_helper.proxy(result) def save(self, member, from_state=None): base_item = self.member_proxy_helper.unproxy(member) result = self.base.save(base_item, from_state=from_state) return self.member_proxy_helper.proxy(result) class ImageFactory(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base def new_image(self, **kwargs): return self.helper.proxy(self.base.new_image(**kwargs)) class ImageMembershipFactory(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base def new_image_member(self, image, member, **kwargs): return self.helper.proxy(self.base.new_image_member(image, member, **kwargs)) class Image(object): def __init__(self, base, member_repo_proxy_class=None, member_repo_proxy_kwargs=None): self.base = base self.helper = Helper(member_repo_proxy_class, member_repo_proxy_kwargs) name = _proxy('base', 'name') image_id = _proxy('base', 'image_id') status = _proxy('base', 'status') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') visibility = _proxy('base', 'visibility') min_disk = _proxy('base', 'min_disk') min_ram = _proxy('base', 'min_ram') protected = _proxy('base', 'protected') os_hidden = _proxy('base', 'os_hidden') locations = _proxy('base', 'locations') checksum = _proxy('base', 'checksum') os_hash_algo = _proxy('base', 'os_hash_algo') os_hash_value = _proxy('base', 'os_hash_value') owner = _proxy('base', 'owner') disk_format = _proxy('base', 'disk_format') container_format = _proxy('base', 'container_format') size = _proxy('base', 'size') virtual_size = _proxy('base', 'virtual_size') extra_properties = _proxy('base', 'extra_properties') tags = _proxy('base', 'tags') member = _proxy('base', 'member') def delete(self): self.base.delete() def deactivate(self): self.base.deactivate() def reactivate(self): self.base.reactivate() def set_data(self, data, size=None, backend=None, set_active=True): self.base.set_data(data, size, backend=backend, set_active=set_active) def get_data(self, *args, **kwargs): return self.base.get_data(*args, **kwargs) class ImageMember(object): def __init__(self, base): self.base = base id = _proxy('base', 'id') image_id = _proxy('base', 'image_id') member_id = _proxy('base', 'member_id') status = _proxy('base', 'status') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') class Task(object): def __init__(self, base): self.base = base task_id = _proxy('base', 'task_id') type = _proxy('base', 'type') status = _proxy('base', 'status') owner = _proxy('base', 'owner') expires_at = _proxy('base', 'expires_at') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') task_input = _proxy('base', 'task_input') result = _proxy('base', 'result') message = _proxy('base', 'message') image_id = _proxy('base', 'image_id') request_id = _proxy('base', 'request_id') user_id = _proxy('base', 'user_id') def begin_processing(self): self.base.begin_processing() def succeed(self, result): self.base.succeed(result) def fail(self, message): self.base.fail(message) def run(self, executor): self.base.run(executor) class TaskStub(object): def __init__(self, base): self.base = base task_id = _proxy('base', 'task_id') type = _proxy('base', 'type') status = _proxy('base', 'status') owner = _proxy('base', 'owner') expires_at = _proxy('base', 'expires_at') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') class TaskFactory(object): def __init__(self, base, task_proxy_class=None, task_proxy_kwargs=None): self.task_helper = Helper(task_proxy_class, task_proxy_kwargs) self.base = base def new_task(self, **kwargs): t = self.base.new_task(**kwargs) return self.task_helper.proxy(t) # Metadef Namespace classes class MetadefNamespaceRepo(object): def __init__(self, base, namespace_proxy_class=None, namespace_proxy_kwargs=None): self.base = base self.namespace_proxy_helper = Helper(namespace_proxy_class, namespace_proxy_kwargs) def get(self, namespace): namespace_obj = self.base.get(namespace) return self.namespace_proxy_helper.proxy(namespace_obj) def add(self, namespace): self.base.add(self.namespace_proxy_helper.unproxy(namespace)) def list(self, *args, **kwargs): namespaces = self.base.list(*args, **kwargs) return [self.namespace_proxy_helper.proxy(namespace) for namespace in namespaces] def remove(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.namespace_proxy_helper.proxy(result) def remove_objects(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_objects(base_item) return self.namespace_proxy_helper.proxy(result) def remove_properties(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_properties(base_item) return self.namespace_proxy_helper.proxy(result) def remove_tags(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_tags(base_item) return self.namespace_proxy_helper.proxy(result) def save(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.namespace_proxy_helper.proxy(result) class MetadefNamespace(object): def __init__(self, base): self.base = base namespace_id = _proxy('base', 'namespace_id') namespace = _proxy('base', 'namespace') display_name = _proxy('base', 'display_name') description = _proxy('base', 'description') owner = _proxy('base', 'owner') visibility = _proxy('base', 'visibility') protected = _proxy('base', 'protected') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefNamespaceFactory(object): def __init__(self, base, meta_namespace_proxy_class=None, meta_namespace_proxy_kwargs=None): self.meta_namespace_helper = Helper(meta_namespace_proxy_class, meta_namespace_proxy_kwargs) self.base = base def new_namespace(self, **kwargs): t = self.base.new_namespace(**kwargs) return self.meta_namespace_helper.proxy(t) # Metadef object classes class MetadefObjectRepo(object): def __init__(self, base, object_proxy_class=None, object_proxy_kwargs=None): self.base = base self.object_proxy_helper = Helper(object_proxy_class, object_proxy_kwargs) def get(self, namespace, object_name): meta_object = self.base.get(namespace, object_name) return self.object_proxy_helper.proxy(meta_object) def add(self, meta_object): self.base.add(self.object_proxy_helper.unproxy(meta_object)) def list(self, *args, **kwargs): objects = self.base.list(*args, **kwargs) return [self.object_proxy_helper.proxy(meta_object) for meta_object in objects] def remove(self, item): base_item = self.object_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.object_proxy_helper.proxy(result) def save(self, item): base_item = self.object_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.object_proxy_helper.proxy(result) class MetadefObject(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') object_id = _proxy('base', 'object_id') name = _proxy('base', 'name') required = _proxy('base', 'required') description = _proxy('base', 'description') properties = _proxy('base', 'properties') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefObjectFactory(object): def __init__(self, base, meta_object_proxy_class=None, meta_object_proxy_kwargs=None): self.meta_object_helper = Helper(meta_object_proxy_class, meta_object_proxy_kwargs) self.base = base def new_object(self, **kwargs): t = self.base.new_object(**kwargs) return self.meta_object_helper.proxy(t) # Metadef ResourceType classes class MetadefResourceTypeRepo(object): def __init__(self, base, resource_type_proxy_class=None, resource_type_proxy_kwargs=None): self.base = base self.resource_type_proxy_helper = Helper(resource_type_proxy_class, resource_type_proxy_kwargs) def add(self, meta_resource_type): self.base.add(self.resource_type_proxy_helper.unproxy( meta_resource_type)) def get(self, *args, **kwargs): resource_type = self.base.get(*args, **kwargs) return self.resource_type_proxy_helper.proxy(resource_type) def list(self, *args, **kwargs): resource_types = self.base.list(*args, **kwargs) return [self.resource_type_proxy_helper.proxy(resource_type) for resource_type in resource_types] def remove(self, item): base_item = self.resource_type_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.resource_type_proxy_helper.proxy(result) class MetadefResourceType(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') name = _proxy('base', 'name') prefix = _proxy('base', 'prefix') properties_target = _proxy('base', 'properties_target') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefResourceTypeFactory(object): def __init__(self, base, resource_type_proxy_class=None, resource_type_proxy_kwargs=None): self.resource_type_helper = Helper(resource_type_proxy_class, resource_type_proxy_kwargs) self.base = base def new_resource_type(self, **kwargs): t = self.base.new_resource_type(**kwargs) return self.resource_type_helper.proxy(t) # Metadef namespace property classes class MetadefPropertyRepo(object): def __init__(self, base, property_proxy_class=None, property_proxy_kwargs=None): self.base = base self.property_proxy_helper = Helper(property_proxy_class, property_proxy_kwargs) def get(self, namespace, property_name): property = self.base.get(namespace, property_name) return self.property_proxy_helper.proxy(property) def add(self, property): self.base.add(self.property_proxy_helper.unproxy(property)) def list(self, *args, **kwargs): properties = self.base.list(*args, **kwargs) return [self.property_proxy_helper.proxy(property) for property in properties] def remove(self, item): base_item = self.property_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.property_proxy_helper.proxy(result) def save(self, item): base_item = self.property_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.property_proxy_helper.proxy(result) class MetadefProperty(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') property_id = _proxy('base', 'property_id') name = _proxy('base', 'name') schema = _proxy('base', 'schema') def delete(self): self.base.delete() class MetadefPropertyFactory(object): def __init__(self, base, property_proxy_class=None, property_proxy_kwargs=None): self.meta_object_helper = Helper(property_proxy_class, property_proxy_kwargs) self.base = base def new_namespace_property(self, **kwargs): t = self.base.new_namespace_property(**kwargs) return self.meta_object_helper.proxy(t) # Metadef tag classes class MetadefTagRepo(object): def __init__(self, base, tag_proxy_class=None, tag_proxy_kwargs=None): self.base = base self.tag_proxy_helper = Helper(tag_proxy_class, tag_proxy_kwargs) def get(self, namespace, name): meta_tag = self.base.get(namespace, name) return self.tag_proxy_helper.proxy(meta_tag) def add(self, meta_tag): self.base.add(self.tag_proxy_helper.unproxy(meta_tag)) def add_tags(self, meta_tags, can_append=False): tags_list = [] for meta_tag in meta_tags: tags_list.append(self.tag_proxy_helper.unproxy(meta_tag)) self.base.add_tags(tags_list, can_append) def list(self, *args, **kwargs): tags = self.base.list(*args, **kwargs) return [self.tag_proxy_helper.proxy(meta_tag) for meta_tag in tags] def remove(self, item): base_item = self.tag_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.tag_proxy_helper.proxy(result) def save(self, item): base_item = self.tag_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.tag_proxy_helper.proxy(result) class MetadefTag(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') tag_id = _proxy('base', 'tag_id') name = _proxy('base', 'name') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefTagFactory(object): def __init__(self, base, meta_tag_proxy_class=None, meta_tag_proxy_kwargs=None): self.meta_tag_helper = Helper(meta_tag_proxy_class, meta_tag_proxy_kwargs) self.base = base def new_tag(self, **kwargs): t = self.base.new_tag(**kwargs) return self.meta_tag_helper.proxy(t) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/gateway.py0000664000175000017500000002115600000000000016327 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from glance.api import policy from glance.api import property_protections from glance.common import property_utils from glance.common import store_utils import glance.db import glance.domain import glance.location import glance.notifier import glance.quota class Gateway(object): def __init__(self, db_api=None, store_api=None, notifier=None, policy_enforcer=None): self.db_api = db_api or glance.db.get_api() self.store_api = store_api or glance_store self.store_utils = store_utils self.notifier = notifier or glance.notifier.Notifier() self.policy = policy_enforcer or policy.Enforcer() def get_image_factory(self, context): factory = glance.domain.ImageFactory() factory = glance.location.ImageFactoryProxy( factory, context, self.store_api, self.store_utils) factory = glance.quota.ImageFactoryProxy( factory, context, self.db_api, self.store_utils) factory = glance.notifier.ImageFactoryProxy( factory, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) factory = property_protections.ProtectedImageFactoryProxy( factory, context, property_rules) return factory def get_image_member_factory(self, context): factory = glance.domain.ImageMemberFactory() factory = glance.quota.ImageMemberFactoryProxy( factory, context, self.db_api, self.store_utils) return factory def get_repo(self, context): """Get the layered ImageRepo model. This is where we construct the "the onion" by layering ImageRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An ImageRepo-like object """ repo = glance.db.ImageRepo(context, self.db_api) repo = glance.location.ImageRepoProxy( repo, context, self.store_api, self.store_utils) repo = glance.quota.ImageRepoProxy( repo, context, self.db_api, self.store_utils) repo = glance.notifier.ImageRepoProxy( repo, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) repo = property_protections.ProtectedImageRepoProxy( repo, context, property_rules) return repo def get_member_repo(self, image, context): repo = glance.db.ImageMemberRepo( context, self.db_api, image) repo = glance.location.ImageMemberRepoProxy( repo, image, context, self.store_api) repo = glance.notifier.ImageMemberRepoProxy( repo, image, context, self.notifier) return repo def get_task_factory(self, context): factory = glance.domain.TaskFactory() factory = glance.notifier.TaskFactoryProxy( factory, context, self.notifier) return factory def get_task_repo(self, context): repo = glance.db.TaskRepo(context, self.db_api) repo = glance.notifier.TaskRepoProxy( repo, context, self.notifier) return repo def get_task_stub_repo(self, context): repo = glance.db.TaskRepo(context, self.db_api) repo = glance.notifier.TaskStubRepoProxy( repo, context, self.notifier) return repo def get_task_executor_factory(self, context, admin_context=None): task_repo = self.get_task_repo(context) image_repo = self.get_repo(context) image_factory = self.get_image_factory(context) if admin_context: admin_repo = self.get_repo(admin_context) else: admin_repo = None return glance.domain.TaskExecutorFactory(task_repo, image_repo, image_factory, admin_repo=admin_repo) def get_metadef_namespace_factory(self, context): factory = glance.domain.MetadefNamespaceFactory() factory = glance.notifier.MetadefNamespaceFactoryProxy( factory, context, self.notifier) return factory def get_metadef_namespace_repo(self, context): """Get the layered NamespaceRepo model. This is where we construct the "the onion" by layering NamespaceRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An NamespaceRepo-like object """ repo = glance.db.MetadefNamespaceRepo(context, self.db_api) repo = glance.notifier.MetadefNamespaceRepoProxy( repo, context, self.notifier) return repo def get_metadef_object_factory(self, context): factory = glance.domain.MetadefObjectFactory() factory = glance.notifier.MetadefObjectFactoryProxy( factory, context, self.notifier) return factory def get_metadef_object_repo(self, context): """Get the layered MetadefObjectRepo model. This is where we construct the "the onion" by layering MetadefObjectRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An MetadefObjectRepo-like object """ repo = glance.db.MetadefObjectRepo(context, self.db_api) repo = glance.notifier.MetadefObjectRepoProxy( repo, context, self.notifier) return repo def get_metadef_resource_type_factory(self, context): factory = glance.domain.MetadefResourceTypeFactory() factory = glance.notifier.MetadefResourceTypeFactoryProxy( factory, context, self.notifier) return factory def get_metadef_resource_type_repo(self, context): """Get the layered MetadefResourceTypeRepo model. This is where we construct the "the onion" by layering MetadefResourceTypeRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An MetadefResourceTypeRepo-like object """ repo = glance.db.MetadefResourceTypeRepo( context, self.db_api) repo = glance.notifier.MetadefResourceTypeRepoProxy( repo, context, self.notifier) return repo def get_metadef_property_factory(self, context): factory = glance.domain.MetadefPropertyFactory() factory = glance.notifier.MetadefPropertyFactoryProxy( factory, context, self.notifier) return factory def get_metadef_property_repo(self, context): """Get the layered MetadefPropertyRepo model. This is where we construct the "the onion" by layering MetadefPropertyRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An MetadefPropertyRepo-like object """ repo = glance.db.MetadefPropertyRepo(context, self.db_api) repo = glance.notifier.MetadefPropertyRepoProxy( repo, context, self.notifier) return repo def get_metadef_tag_factory(self, context): factory = glance.domain.MetadefTagFactory() factory = glance.notifier.MetadefTagFactoryProxy( factory, context, self.notifier) return factory def get_metadef_tag_repo(self, context): """Get the layered MetadefTagRepo model. This is where we construct the "the onion" by layering MetadefTagRepo models on top of each other, starting with the DB at the bottom. :param context: The RequestContext :returns: An MetadefTagRepo-like object """ repo = glance.db.MetadefTagRepo(context, self.db_api) repo = glance.notifier.MetadefTagRepoProxy( repo, context, self.notifier) return repo ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.874305 glance-29.0.0/glance/hacking/0000775000175000017500000000000000000000000015713 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/hacking/__init__.py0000664000175000017500000000000000000000000020012 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/hacking/checks.py0000664000175000017500000001003100000000000017520 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from hacking import core """ Guidelines for writing new hacking checks - Use only for Glance-specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range G3xx. Find the current test with the highest allocated number and then pick the next value. If nova has an N3xx code for that test, use the same number. - Keep the test method code in the source file ordered based on the G3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to glance/tests/test_hacking.py """ asse_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") asse_equal_type_re = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " r"(\w|\.|\'|\"|\[|\])+\)") asse_equal_end_with_none_re = re.compile( r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)") asse_equal_start_with_none_re = re.compile( r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)") unicode_func_re = re.compile(r"(\s|\W|^)unicode\(") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") @core.flake8ext def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences G316 """ if asse_trueinst_re.match(logical_line): yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed") @core.flake8ext def assert_equal_type(logical_line): """Check for assertEqual(type(A), B) sentences G317 """ if asse_equal_type_re.match(logical_line): yield (0, "G317: assertEqual(type(A), B) sentences not allowed") @core.flake8ext def assert_equal_none(logical_line): """Check for assertEqual(A, None) or assertEqual(None, A) sentences G318 """ res = (asse_equal_start_with_none_re.match(logical_line) or asse_equal_end_with_none_re.match(logical_line)) if res: yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed") @core.flake8ext def no_translate_debug_logs(logical_line, filename): dirs = [ "glance/api", "glance/cmd", "glance/common", "glance/db", "glance/domain", "glance/image_cache", "glance/quota", "glance/store", "glance/tests", ] if max([name in filename for name in dirs]): if logical_line.startswith("LOG.debug(_("): yield (0, "G319: Don't translate debug level logs") @core.flake8ext def check_no_contextlib_nested(logical_line): msg = ("G327: contextlib.nested is deprecated since Python 2.7. See " "https://docs.python.org/2/library/contextlib.html#contextlib." "nested for more information.") if ("with contextlib.nested(" in logical_line or "with nested(" in logical_line): yield (0, msg) @core.flake8ext def dict_constructor_with_list_copy(logical_line): msg = ("G328: Must use a dict comprehension instead of a dict constructor " "with a sequence of key-value pairs.") if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' Use LOG.warning() instead of Deprecated LOG.warn(). https://docs.python.org/3/library/logging.html#logging.warning """ msg = ("G330: LOG.warn is deprecated, please use LOG.warning!") if "LOG.warn(" in logical_line: yield (0, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/housekeeping.py0000664000175000017500000001064700000000000017357 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from glance.common import exception from glance.common import store_utils from glance import context from glance.i18n import _LE LOG = logging.getLogger(__name__) CONF = cfg.CONF def staging_store_path(): """Return the local path to the staging store. :raises: GlanceException if staging store is not configured to be a file:// URI """ if CONF.enabled_backends: separator, staging_dir = store_utils.get_dir_separator() else: staging_dir = CONF.node_staging_uri expected_prefix = 'file://' if not staging_dir.startswith(expected_prefix): raise exception.GlanceException( 'Unexpected scheme in staging store; ' 'unable to scan for residue') return staging_dir[len(expected_prefix):] class StagingStoreCleaner: def __init__(self, db): self.db = db self.context = context.get_admin_context() @staticmethod def get_image_id(filename): if '.' in filename: filename, ext = filename.split('.', 1) if uuidutils.is_uuid_like(filename): return filename def is_valid_image(self, image_id): try: image = self.db.image_get(self.context, image_id) # FIXME(danms): Maybe check that it's not deleted or # something else like state, size, etc return not image['deleted'] except exception.ImageNotFound: return False @staticmethod def delete_file(path): try: os.remove(path) except FileNotFoundError: # NOTE(danms): We must have raced with something else, so this # is not a problem pass except Exception as e: LOG.error(_LE('Failed to delete stale staging ' 'path %(path)r: %(err)s'), {'path': path, 'err': str(e)}) return False return True def clean_orphaned_staging_residue(self): try: files = os.listdir(staging_store_path()) except FileNotFoundError: # NOTE(danms): If we cannot list the staging dir, there is # clearly nothing left from a previous run, so nothing to # clean up. files = [] if not files: return LOG.debug('Found %i files in staging directory for potential cleanup', len(files)) cleaned = ignored = error = 0 for filename in files: image_id = self.get_image_id(filename) if not image_id: # NOTE(danms): We should probably either have a config option # that decides what to do here (i.e. reap or ignore), or decide # that this is not okay and just nuke anything we find. LOG.debug('Staging directory contains unexpected non-image ' 'file %r; ignoring', filename) ignored += 1 continue if self.is_valid_image(image_id): # NOTE(danms): We found a non-deleted image for this # file, so leave it in place. ignored += 1 continue path = os.path.join(staging_store_path(), filename) LOG.debug('Stale staging residue found for image ' '%(uuid)s: %(file)r; deleting now.', {'uuid': image_id, 'file': path}) if self.delete_file(path): cleaned += 1 else: error += 1 LOG.debug('Cleaned %(cleaned)i stale staging files, ' '%(ignored)i ignored (%(error)i errors)', {'cleaned': cleaned, 'ignored': ignored, 'error': error}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/i18n.py0000664000175000017500000000250700000000000015444 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n DOMAIN = 'glance' _translators = i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def enable_lazy(enable=True): return i18n.enable_lazy(enable) def translate(value, user_locale=None): return i18n.translate(value, user_locale) def get_available_languages(domain=DOMAIN): return i18n.get_available_languages(domain) # i18n log translation functions are deprecated. While removing the invocations # requires a lot of reviewing effort, we decide to make it as no-op functions. def _LI(msg): return msg def _LW(msg): return msg def _LE(msg): return msg def _LC(msg): return msg ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.874305 glance-29.0.0/glance/image_cache/0000775000175000017500000000000000000000000016514 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/__init__.py0000664000175000017500000004071100000000000020630 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LRU Cache for Image Data """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import importutils from oslo_utils.secretutils import md5 from oslo_utils import units from glance.common import exception from glance.common import utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) image_cache_opts = [ cfg.StrOpt('image_cache_driver', default='centralized_db', choices=('centralized_db', 'sqlite', 'xattr'), ignore_case=True, help=_(""" The driver to use for image cache management. This configuration option provides the flexibility to choose between the different image-cache drivers available. An image-cache driver is responsible for providing the essential functions of image-cache like write images to/read images from cache, track age and usage of cached images, provide a list of cached images, fetch size of the cache, queue images for caching and clean up the cache, etc. The essential functions of a driver are defined in the base class ``glance.image_cache.drivers.base.Driver``. All image-cache drivers (existing and prospective) must implement this interface. Currently available drivers are ``sqlite`` and ``xattr``. These drivers primarily differ in the way they store the information about cached images: * The ``centralized_db`` driver uses a central database (which will be common for all glance nodes) to track the usage of cached images. * The ``sqlite`` (deprecated) driver uses a sqlite database (which sits on every glance node locally) to track the usage of cached images. * The ``xattr`` driver uses the extended attributes of files to store this information. It also requires a filesystem that sets ``atime`` on the files when accessed. Deprecation warning: * As centralized database will now be used for image cache management, the use of `sqlite` database and driver will be dropped from 'E' (2025.1) development cycle. Possible values: * centralized_db * sqlite * xattr Related options: * None """)), cfg.IntOpt('image_cache_max_size', default=10 * units.Gi, # 10 GB min=0, help=_(""" The upper limit on cache size, in bytes, after which the cache-pruner cleans up the image cache. NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a hard limit beyond which the image cache would never grow. In fact, depending on how often the cache-pruner runs and how quickly the cache fills, the image cache can far exceed the size specified here very easily. Hence, care must be taken to appropriately schedule the cache-pruner and in setting this limit. Glance caches an image when it is downloaded. Consequently, the size of the image cache grows over time as the number of downloads increases. To keep the cache size from becoming unmanageable, it is recommended to run the cache-pruner as a periodic task. When the cache pruner is kicked off, it compares the current size of image cache and triggers a cleanup if the image cache grew beyond the size specified here. After the cleanup, the size of cache is less than or equal to size specified here. Possible values: * Any non-negative integer Related options: * None """)), cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours min=0, help=_(""" The amount of time, in seconds, an incomplete image remains in the cache. Incomplete images are images for which download is in progress. Please see the description of configuration option ``image_cache_dir`` for more detail. Sometimes, due to various reasons, it is possible the download may hang and the incompletely downloaded image remains in the ``incomplete`` directory. This configuration option sets a time limit on how long the incomplete images should remain in the ``incomplete`` directory before they are cleaned up. Once an incomplete image spends more time than is specified here, it'll be removed by cache-cleaner on its next run. It is recommended to run cache-cleaner as a periodic task on the Glance API nodes to keep the incomplete images from occupying disk space. Possible values: * Any non-negative integer Related options: * None """)), cfg.StrOpt('image_cache_dir', help=_(""" Base directory for image cache. This is the location where image data is cached and served out of. All cached images are stored directly under this directory. This directory also contains three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``. The ``incomplete`` subdirectory is the staging area for downloading images. An image is first downloaded to this directory. When the image download is successful it is moved to the base directory. However, if the download fails, the partially downloaded image file is moved to the ``invalid`` subdirectory. The ``queue``subdirectory is used for queuing images for download. This is used primarily by the cache-prefetcher, which can be scheduled as a periodic task like cache-pruner and cache-cleaner, to cache images ahead of their usage. Upon receiving the request to cache an image, Glance touches a file in the ``queue`` directory with the image id as the file name. The cache-prefetcher, when running, polls for the files in ``queue`` directory and starts downloading them in the order they were created. When the download is successful, the zero-sized file is deleted from the ``queue`` directory. If the download fails, the zero-sized file remains and it'll be retried the next time cache-prefetcher runs. Possible values: * A valid path Related options: * ``image_cache_sqlite_db`` """)), ] CONF = cfg.CONF CONF.register_opts(image_cache_opts) class ImageCache(object): """Provides an LRU cache for image data.""" def __init__(self): self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = CONF.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = importutils.import_class(driver_module) LOG.info(_LI("Image cache loaded driver '%s'."), driver_name) except ImportError as import_err: LOG.warning(_LW("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s."), {'driver_name': driver_name, 'import_err': import_err}) driver_module = __name__ + '.drivers.sqlite.Driver' LOG.info(_LI("Defaulting to SQLite driver.")) self.driver_class = importutils.import_class(driver_module) self.configure_driver() def configure_driver(self): """ Configure the driver for the cache and, if it fails to configure, fall back to using the SQLite driver which has no odd dependencies """ try: self.driver = self.driver_class() self.driver.configure() except exception.BadDriverConfiguration as config_err: driver_module = self.driver_class.__module__ LOG.warning(_LW("Image cache driver " "'%(driver_module)s' failed to configure. " "Got error: '%(config_err)s"), {'driver_module': driver_module, 'config_err': config_err}) LOG.info(_LI("Defaulting to SQLite driver.")) default_module = __name__ + '.drivers.sqlite.Driver' self.driver_class = importutils.import_class(default_module) self.driver = self.driver_class() self.driver.configure() def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return self.driver.is_cached(image_id) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ return self.driver.is_queued(image_id) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ return self.driver.get_cache_size() def get_hit_count(self, image_id): """ Return the number of hits that an image has :param image_id: Opaque image identifier """ return self.driver.get_hit_count(image_id) def get_cached_images(self): """ Returns a list of records about cached images. """ return self.driver.get_cached_images() def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images and returns the number of cached image files that were deleted. """ return self.driver.delete_all_cached_images() def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ self.driver.delete_cached_image(image_id) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images and returns the number of queued image files that were deleted. """ return self.driver.delete_all_queued_images() def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ self.driver.delete_queued_image(image_id) def prune(self): """ Removes all cached image files above the cache's maximum size. Returns a tuple containing the total number of cached files removed and the total size of all pruned image files. """ max_size = CONF.image_cache_max_size current_size = self.driver.get_cache_size() if max_size > current_size: LOG.debug("Image cache has free space, skipping prune...") return (0, 0) overage = current_size - max_size LOG.debug("Image cache currently %(overage)d bytes over max " "size. Starting prune to max size of %(max_size)d ", {'overage': overage, 'max_size': max_size}) total_bytes_pruned = 0 total_files_pruned = 0 entry = self.driver.get_least_recently_accessed() while entry and current_size > max_size: image_id, size = entry LOG.debug("Pruning '%(image_id)s' to free %(size)d bytes", {'image_id': image_id, 'size': size}) self.driver.delete_cached_image(image_id) total_bytes_pruned = total_bytes_pruned + size total_files_pruned = total_files_pruned + 1 current_size = current_size - size entry = self.driver.get_least_recently_accessed() LOG.debug("Pruning finished pruning. " "Pruned %(total_files_pruned)d and " "%(total_bytes_pruned)d.", {'total_files_pruned': total_files_pruned, 'total_bytes_pruned': total_bytes_pruned}) return total_files_pruned, total_bytes_pruned def clean(self, stall_time=None): """ Cleans up any invalid or incomplete cached images. The cache driver decides what that means... """ self.driver.clean(stall_time) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ return self.driver.queue_image(image_id) def get_caching_iter(self, image_id, image_checksum, image_iter): """ Returns an iterator that caches the contents of an image while the image contents are read through the supplied iterator. :param image_id: Image ID :param image_checksum: checksum expected to be generated while iterating over image data :param image_iter: Iterator that will read image contents """ if not self.driver.is_cacheable(image_id): return image_iter LOG.debug("Tee'ing image '%s' into cache", image_id) return self.cache_tee_iter(image_id, image_iter, image_checksum) def cache_tee_iter(self, image_id, image_iter, image_checksum): try: current_checksum = md5(usedforsecurity=False) with self.driver.open_for_write(image_id) as cache_file: for chunk in image_iter: try: cache_file.write(chunk) finally: current_checksum.update(chunk) yield chunk cache_file.flush() if (image_checksum and image_checksum != current_checksum.hexdigest()): msg = _("Checksum verification failed. Aborted " "caching of image '%s'.") % image_id raise exception.GlanceException(msg) except exception.GlanceException as e: with excutils.save_and_reraise_exception(): # image_iter has given us bad, (size_checked_iter has found a # bad length), or corrupt data (checksum is wrong). LOG.exception(encodeutils.exception_to_unicode(e)) except Exception as e: LOG.exception(_LE("Exception encountered while tee'ing " "image '%(image_id)s' into cache: %(error)s. " "Continuing with response."), {'image_id': image_id, 'error': encodeutils.exception_to_unicode(e)}) # If no checksum provided continue responding even if # caching failed. for chunk in image_iter: yield chunk def cache_image_iter(self, image_id, image_iter, image_checksum=None): """ Cache an image with supplied iterator. :param image_id: Image ID :param image_file: Iterator retrieving image chunks :param image_checksum: Checksum of image :returns: True if image file was cached, False otherwise """ if not self.driver.is_cacheable(image_id): return False for chunk in self.get_caching_iter(image_id, image_checksum, image_iter): pass return True def cache_image_file(self, image_id, image_file): """ Cache an image file. :param image_id: Image ID :param image_file: Image file to cache :returns: True if image file was cached, False otherwise """ CHUNKSIZE = 64 * units.Mi return self.cache_image_iter(image_id, utils.chunkiter(image_file, CHUNKSIZE)) def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :note Upon successful reading of the image file, the image's hit count will be incremented. :param image_id: Image ID """ return self.driver.open_for_read(image_id) def get_image_size(self, image_id): """ Return the size of the image file for an image with supplied identifier. :param image_id: Image ID """ return self.driver.get_image_size(image_id) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ return self.driver.get_queued_images() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/base.py0000664000175000017500000000133500000000000020002 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.image_cache import ImageCache class CacheApp(object): def __init__(self): self.cache = ImageCache() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/cleaner.py0000664000175000017500000000143500000000000020502 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cleans up any invalid cache entries """ from glance.image_cache import base class Cleaner(base.CacheApp): def run(self): self.cache.clean() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/client.py0000664000175000017500000001052100000000000020343 0ustar00zuulzuul00000000000000# Copyright 2018 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_serialization import jsonutils as json from glance.common import client as base_client from glance.common import exception from glance.i18n import _ class CacheClient(base_client.BaseClient): DEFAULT_PORT = 9292 DEFAULT_DOC_ROOT = '/v2' def delete_cached_image(self, image_id): """ Delete a specified image from the cache """ self.do_request("DELETE", "/cached_images/%s" % image_id) return True def get_cached_images(self, **kwargs): """ Returns a list of images stored in the image cache. """ res = self.do_request("GET", "/cached_images") data = json.loads(res.read())['cached_images'] return data def get_queued_images(self, **kwargs): """ Returns a list of images queued for caching """ res = self.do_request("GET", "/queued_images") data = json.loads(res.read())['queued_images'] return data def delete_all_cached_images(self): """ Delete all cached images """ res = self.do_request("DELETE", "/cached_images") data = json.loads(res.read()) num_deleted = data['num_deleted'] return num_deleted def queue_image_for_caching(self, image_id): """ Queue an image for prefetching into cache """ self.do_request("PUT", "/queued_images/%s" % image_id) return True def delete_queued_image(self, image_id): """ Delete a specified image from the cache queue """ self.do_request("DELETE", "/queued_images/%s" % image_id) return True def delete_all_queued_images(self): """ Delete all queued images """ res = self.do_request("DELETE", "/queued_images") data = json.loads(res.read()) num_deleted = data['num_deleted'] return num_deleted def get_client(host, port=None, timeout=None, use_ssl=False, username=None, password=None, project=None, user_domain_id=None, project_domain_id=None, auth_url=None, auth_strategy=None, auth_token=None, region=None, insecure=False): """ Returns a new client Glance client object based on common kwargs. If an option isn't specified falls back to common environment variable defaults. """ if auth_url or os.getenv('OS_AUTH_URL'): force_strategy = 'keystone' else: force_strategy = None creds = { 'username': username or os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')), 'password': password or os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')), 'project': project or os.getenv('OS_AUTH_PROJECT', os.getenv('OS_PROJECT_NAME')), 'auth_url': auth_url or os.getenv('OS_AUTH_URL'), 'strategy': force_strategy or auth_strategy or os.getenv('OS_AUTH_STRATEGY', 'noauth'), 'region': region or os.getenv('OS_REGION_NAME'), 'user_domain_id': user_domain_id or os.getenv( 'OS_USER_DOMAIN_ID', 'default'), 'project_domain_id': project_domain_id or os.getenv( 'OS_PROJECT_DOMAIN_ID', 'default') } if creds['strategy'] == 'keystone' and not creds['auth_url']: msg = _("--os_auth_url option or OS_AUTH_URL environment variable " "required when keystone authentication strategy is enabled\n") raise exception.ClientConfigurationError(msg) return CacheClient( host=host, port=port, timeout=timeout, use_ssl=use_ssl, auth_token=auth_token or os.getenv('OS_TOKEN'), creds=creds, insecure=insecure, configure_via_auth=False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.874305 glance-29.0.0/glance/image_cache/drivers/0000775000175000017500000000000000000000000020172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/__init__.py0000664000175000017500000000000000000000000022271 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/base.py0000664000175000017500000001464600000000000021471 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base attribute driver class """ import os.path from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance.common import utils from glance.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class Driver(object): def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ # Here we set up the various file-based image cache paths # that we need in order to find the files in different states # of cache management. self.set_paths() def set_paths(self): """ Creates all necessary directories under the base cache directory """ self.base_dir = CONF.image_cache_dir if self.base_dir is None: msg = _('Failed to read %s from config') % 'image_cache_dir' LOG.error(msg) driver = self.__class__.__module__ raise exception.BadDriverConfiguration(driver_name=driver, reason=msg) self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') self.invalid_dir = os.path.join(self.base_dir, 'invalid') self.queue_dir = os.path.join(self.base_dir, 'queue') dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ raise NotImplementedError def get_cached_images(self): """ Returns a list of records about cached images. The list of records shall be ordered by image ID and shall look like:: [ { 'image_id': , 'hits': INTEGER, 'last_modified': ISO_TIMESTAMP, 'last_accessed': ISO_TIMESTAMP, 'size': INTEGER }, ... ] """ return NotImplementedError def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ raise NotImplementedError def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ raise NotImplementedError def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ raise NotImplementedError def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images and returns the number of cached image files that were deleted. """ raise NotImplementedError def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ raise NotImplementedError def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images and returns the number of queued image files that were deleted. """ raise NotImplementedError def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ raise NotImplementedError def queue_image(self, image_id): """ Puts an image identifier in a queue for caching. Return True on successful add to the queue, False otherwise... :param image_id: Image ID """ def clean(self, stall_time=None): """ Dependent on the driver, clean up and destroy any invalid or incomplete cached images """ raise NotImplementedError def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ raise NotImplementedError def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ raise NotImplementedError def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ raise NotImplementedError def get_image_filepath(self, image_id, cache_status='active'): """ This crafts an absolute path to a specific entry :param image_id: Image ID :param cache_status: Status of the image in the cache """ if cache_status == 'active': return os.path.join(self.base_dir, str(image_id)) return os.path.join(self.base_dir, cache_status, str(image_id)) def get_image_size(self, image_id): """ Return the size of the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) return os.path.getsize(path) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/centralized_db.py0000664000175000017500000003146300000000000023524 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses Centralized database of glance to store information about cached images """ from contextlib import contextmanager import os import stat import time from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from glance.common import exception from glance import context import glance.db from glance.i18n import _LI, _LW from glance.image_cache.drivers import base LOG = logging.getLogger(__name__) CONF = cfg.CONF class Driver(base.Driver): """ Cache driver that uses centralized database to store cache information. """ def __init__(self): self.context = context.get_admin_context() self.db_api = glance.db.get_api() def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ super(Driver, self).configure() lockutils.set_defaults(self.base_dir) # NOTE(abhishekk): Record the node reference in the database for # future use. node_reference_url = CONF.worker_self_reference_url if node_reference_url: try: self.db_api.node_reference_create( self.context, node_reference_url) except exception.Duplicate: LOG.debug("Node reference is already recorded, ignoring it") def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in self.get_cache_files(self.base_dir): file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 node_reference_url = CONF.worker_self_reference_url return self.db_api.get_hit_count(self.context, image_id, node_reference_url) def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") node_reference_url = CONF.worker_self_reference_url return self.db_api.get_cached_images( self.context, node_reference_url) def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 for path in self.get_cache_files(self.base_dir): delete_cached_file(path) deleted += 1 node_reference_url = CONF.worker_self_reference_url self.db_api.delete_all_cached_images( self.context, node_reference_url) return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ node_reference_url = CONF.worker_self_reference_url path = self.get_image_filepath(image_id) delete_cached_file(path) self.db_api.delete_cached_image( self.context, image_id, node_reference_url) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files_deleted = 0 for file in self.get_cache_files(self.queue_dir): fileutils.delete_if_exists(file) files_deleted += 1 return files_deleted def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') fileutils.delete_if_exists(path) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.delete_invalid_files() if stall_time is None: stall_time = CONF.image_cache_stall_time now = time.time() older_than = now - stall_time self.delete_stalled_files(older_than) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ node_reference_url = CONF.worker_self_reference_url image_id = self.db_api.get_least_recently_accessed( self.context, node_reference_url) path = self.get_image_filepath(image_id) try: file_info = os.stat(path) size = file_info[stat.ST_SIZE] except OSError: size = 0 return image_id, size @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') node_reference_url = CONF.worker_self_reference_url def commit(): final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): fileutils.delete_if_exists( self.get_image_filepath(image_id, 'queue')) file_size = os.path.getsize(final_path) self.db_api.insert_cache_details( self.context, node_reference_url, image_id, file_size) LOG.debug("Image cached successfully.") def rollback(e): if os.path.exists(incomplete_path): invalid_path = self.get_image_filepath(image_id, 'invalid') msg = (_LW("Fetch of cache file failed (%(e)s), rolling " "back by moving '%(incomplete_path)s' to " "'%(invalid_path)s'"), {'e': e, 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) LOG.warning(msg) os.rename(incomplete_path, invalid_path) self.db_api.delete_cached_image( self.context, image_id, node_reference_url) try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) try: with open(path, 'rb') as cache_file: yield cache_file finally: node_reference_url = CONF.worker_self_reference_url self.db_api.update_hit_count( self.context, image_id, node_reference_url) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') # Touch the file to add it to the queue with open(path, "w"): pass return True def delete_invalid_files(self): """ Removes any invalid cache entries """ for path in self.get_cache_files(self.invalid_dir): fileutils.delete_if_exists(path) LOG.info(_LI("Removed invalid cache file %s"), path) def delete_stalled_files(self, older_than): """ Removes any incomplete cache entries older than a supplied modified time. :param older_than: Files written to on or before this timestamp will be deleted. """ for path in self.get_cache_files(self.incomplete_dir): if os.path.getmtime(path) < older_than: try: fileutils.delete_if_exists(path) LOG.info(_LI("Removed stalled cache file %s"), path) except Exception as e: msg = (_LW("Failed to delete file %(path)s. " "Got error: %(e)s"), dict(path=path, e=e)) LOG.warning(msg) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in self.get_cache_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def get_cache_files(self, basepath): """ Returns cache files in the supplied directory :param basepath: Directory to look in for cache files """ for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if os.path.isfile(path) and not path.endswith(".db"): yield path def delete_cached_file(path): LOG.debug("Deleting image cache file '%s'", path) fileutils.delete_if_exists(path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/common.py0000664000175000017500000000535300000000000022042 0ustar00zuulzuul00000000000000# Copyright 2023 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common code which will be used in SQLite and centralzed_db driver until SQLite driver is removed from glance. """ from contextlib import contextmanager import sqlite3 from eventlet import sleep from eventlet import timeout from oslo_log import log as logging from glance.i18n import _LE LOG = logging.getLogger(__name__) DEFAULT_SQL_CALL_TIMEOUT = 2 def dict_factory(cur, row): return {col[0]: row[idx] for idx, col in enumerate(cur.description)} class SqliteConnection(sqlite3.Connection): """ SQLite DB Connection handler that plays well with eventlet, slightly modified from Swift's similar code. """ def __init__(self, *args, **kwargs): self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT) kwargs['timeout'] = 0 sqlite3.Connection.__init__(self, *args, **kwargs) def _timeout(self, call): with timeout.Timeout(self.timeout_seconds): while True: try: return call() except sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(0.05) def execute(self, *args, **kwargs): return self._timeout(lambda: sqlite3.Connection.execute( self, *args, **kwargs)) def commit(self): return self._timeout(lambda: sqlite3.Connection.commit(self)) @contextmanager def get_db(db_path): """ Returns a context manager that produces a database connection that self-closes and calls rollback if an error occurs while using the database connection """ conn = sqlite3.connect(db_path, check_same_thread=False, factory=SqliteConnection) conn.row_factory = sqlite3.Row conn.text_factory = str conn.execute('PRAGMA synchronous = NORMAL') conn.execute('PRAGMA count_changes = OFF') conn.execute('PRAGMA temp_store = MEMORY') try: yield conn except sqlite3.DatabaseError as e: msg = _LE("Error executing SQLite call. Got error: %s") % e LOG.error(msg) conn.rollback() finally: conn.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/sqlite.py0000664000175000017500000003673600000000000022064 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses SQLite to store information about cached images """ from contextlib import contextmanager import os import sqlite3 import stat import time from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from glance.common import exception from glance.i18n import _, _LI, _LW from glance.image_cache.drivers import base from glance.image_cache.drivers import common LOG = logging.getLogger(__name__) DEPRECATION_REASON = """ As centralized database will now be used for image cache management, the use of `sqlite` database and driver will be dropped from 'E' (2025.1) development cycle. """ sqlite_opts = [ cfg.StrOpt('image_cache_sqlite_db', default='cache.db', deprecated_for_removal=True, deprecated_reason=DEPRECATION_REASON, deprecated_since='Caracal (2024.1)', help=_(""" The relative path to sqlite file database that will be used for image cache management. This is a relative path to the sqlite file database that tracks the age and usage statistics of image cache. The path is relative to image cache base directory, specified by the configuration option ``image_cache_dir``. This is a lightweight database with just one table. Possible values: * A valid relative path to sqlite file database Related options: * ``image_cache_dir`` """)), ] CONF = cfg.CONF CONF.register_opts(sqlite_opts) class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ LOG.warning(_(DEPRECATION_REASON)) super(Driver, self).configure() # Create the SQLite database that will hold our cache attributes self.initialize_db() def initialize_db(self): db = CONF.image_cache_sqlite_db self.db_path = os.path.join(self.base_dir, db) lockutils.set_defaults(self.base_dir) @lockutils.synchronized('image_cache_db_init', external=True) def create_db(): try: conn = sqlite3.connect(self.db_path, check_same_thread=False, factory=common.SqliteConnection) conn.executescript(""" CREATE TABLE IF NOT EXISTS cached_images ( image_id TEXT PRIMARY KEY, last_accessed REAL DEFAULT 0.0, last_modified REAL DEFAULT 0.0, size INTEGER DEFAULT 0, hits INTEGER DEFAULT 0, checksum TEXT ); """) conn.close() except sqlite3.DatabaseError as e: msg = _("Failed to initialize the image cache database. " "Got error: %s") % e LOG.error(msg) raise exception.BadDriverConfiguration(driver_name='sqlite', reason=msg) create_db() def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in self.get_cache_files(self.base_dir): if path == self.db_path: continue file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 hits = 0 with common.get_db(self.db_path) as db: cur = db.execute("""SELECT hits FROM cached_images WHERE image_id = ?""", (image_id,)) hits = cur.fetchone()[0] return hits def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") with common.get_db(self.db_path) as db: cur = db.execute("""SELECT image_id, hits, last_accessed, last_modified, size FROM cached_images ORDER BY image_id""") cur.row_factory = common.dict_factory return [r for r in cur] def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 with common.get_db(self.db_path) as db: for path in self.get_cache_files(self.base_dir): delete_cached_file(path) deleted += 1 db.execute("""DELETE FROM cached_images""") db.commit() return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id) with common.get_db(self.db_path) as db: delete_cached_file(path) db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit() def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in self.get_cache_files(self.queue_dir)] for file in files: fileutils.delete_if_exists(file) return len(files) def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') fileutils.delete_if_exists(path) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.delete_invalid_files() if stall_time is None: stall_time = CONF.image_cache_stall_time now = time.time() older_than = now - stall_time self.delete_stalled_files(older_than) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ with common.get_db(self.db_path) as db: cur = db.execute("""SELECT image_id FROM cached_images ORDER BY last_accessed LIMIT 1""") try: image_id = cur.fetchone()[0] except TypeError: # There are no more cached images return None path = self.get_image_filepath(image_id) try: file_info = os.stat(path) size = file_info[stat.ST_SIZE] except OSError: size = 0 return image_id, size @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') def commit(): with common.get_db(self.db_path) as db: final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): fileutils.delete_if_exists( self.get_image_filepath(image_id, 'queue')) filesize = os.path.getsize(final_path) now = time.time() db.execute("""INSERT INTO cached_images (image_id, last_accessed, last_modified, hits, size) VALUES (?, ?, ?, 0, ?)""", (image_id, now, now, filesize)) db.commit() def rollback(e): with common.get_db(self.db_path) as db: if os.path.exists(incomplete_path): invalid_path = self.get_image_filepath(image_id, 'invalid') msg = (_LW("Fetch of cache file failed (%(e)s), rolling " "back by moving '%(incomplete_path)s' to " "'%(invalid_path)s'"), {'e': e, 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) LOG.warning(msg) os.rename(incomplete_path, invalid_path) db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit() try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) with open(path, 'rb') as cache_file: yield cache_file now = time.time() with common.get_db(self.db_path) as db: db.execute("""UPDATE cached_images SET hits = hits + 1, last_accessed = ? WHERE image_id = ?""", (now, image_id)) db.commit() def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') # Touch the file to add it to the queue with open(path, "w"): pass return True def delete_invalid_files(self): """ Removes any invalid cache entries """ for path in self.get_cache_files(self.invalid_dir): fileutils.delete_if_exists(path) LOG.info(_LI("Removed invalid cache file %s"), path) def delete_stalled_files(self, older_than): """ Removes any incomplete cache entries older than a supplied modified time. :param older_than: Files written to on or before this timestamp will be deleted. """ for path in self.get_cache_files(self.incomplete_dir): if os.path.getmtime(path) < older_than: try: fileutils.delete_if_exists(path) LOG.info(_LI("Removed stalled cache file %s"), path) except Exception as e: msg = (_LW("Failed to delete file %(path)s. " "Got error: %(e)s"), dict(path=path, e=e)) LOG.warning(msg) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in self.get_cache_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def get_cache_files(self, basepath): """ Returns cache files in the supplied directory :param basepath: Directory to look in for cache files """ for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if path != self.db_path and os.path.isfile(path): yield path def delete_cached_file(path): LOG.debug("Deleting image cache file '%s'", path) fileutils.delete_if_exists(path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/drivers/xattr.py0000664000175000017500000004047200000000000021715 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. Assumptions =========== 1. Cache data directory exists on a filesystem that updates atime on reads ('noatime' should NOT be set) 2. Cache data directory exists on a filesystem that supports xattrs. This is optional, but highly recommended since it allows us to present ops with useful information pertaining to the cache, like human readable filenames and statistics. 3. `glance-prune` is scheduled to run as a periodic job via cron. This is needed to run the LRU prune strategy to keep the cache size within the limits set by the config file. Cache Directory Notes ===================== The image cache data directory contains the main cache path, where the active cache entries and subdirectories for handling partial downloads and errored-out cache images. The layout looks like: $image_cache_dir/ entry1 entry2 ... incomplete/ invalid/ queue/ """ from contextlib import contextmanager import errno import os import stat import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import fileutils import xattr from glance.common import exception from glance.i18n import _, _LI from glance.image_cache.drivers import base LOG = logging.getLogger(__name__) CONF = cfg.CONF class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ # Here we set up the various file-based image cache paths # that we need in order to find the files in different states # of cache management. self.set_paths() # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs image_cache_dir = self.base_dir fake_image_filepath = os.path.join(image_cache_dir, 'checkme') with open(fake_image_filepath, 'wb') as fake_file: fake_file.write(b"XXX") fake_file.flush() try: set_xattr(fake_image_filepath, 'hits', '1') except IOError as e: if e.errno == errno.EOPNOTSUPP: msg = (_("The device housing the image cache directory " "%(image_cache_dir)s does not support xattr. It is" " likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the" " device housing the cache directory.") % {'image_cache_dir': image_cache_dir}) LOG.error(msg) raise exception.BadDriverConfiguration(driver_name="xattr", reason=msg) else: # Cleanup after ourselves... fileutils.delete_if_exists(fake_image_filepath) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 path = self.get_image_filepath(image_id) return int(get_xattr(path, 'hits', default=0)) def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") entries = [] for path in get_all_regular_files(self.base_dir): image_id = os.path.basename(path) entry = {'image_id': image_id} file_info = os.stat(path) entry['last_modified'] = file_info[stat.ST_MTIME] entry['last_accessed'] = file_info[stat.ST_ATIME] entry['size'] = file_info[stat.ST_SIZE] entry['hits'] = self.get_hit_count(image_id) entries.append(entry) return entries def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 for path in get_all_regular_files(self.base_dir): delete_cached_file(path) deleted += 1 return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id) delete_cached_file(path) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in get_all_regular_files(self.queue_dir)] for file in files: fileutils.delete_if_exists(file) return len(files) def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') fileutils.delete_if_exists(path) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ stats = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) stats.append((file_info[stat.ST_ATIME], # access time file_info[stat.ST_SIZE], # size in bytes path)) # absolute path if not stats: return None stats.sort() return os.path.basename(stats[0][2]), stats[0][1] @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') def set_attr(key, value): set_xattr(incomplete_path, key, value) def commit(): set_attr('hits', 0) final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): LOG.debug("Removing image '%s' from queue after " "caching it.", image_id) fileutils.delete_if_exists( self.get_image_filepath(image_id, 'queue')) def rollback(e): set_attr('error', encodeutils.exception_to_unicode(e)) invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.debug("Fetch of cache file failed (%(e)s), rolling back by " "moving '%(incomplete_path)s' to " "'%(invalid_path)s'", {'e': encodeutils.exception_to_unicode(e), 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) os.rename(incomplete_path, invalid_path) try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) with open(path, 'rb') as cache_file: yield cache_file path = self.get_image_filepath(image_id) inc_xattr(path, 'hits', 1) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') LOG.debug("Queueing image '%s'.", image_id) # Touch the file to add it to the queue with open(path, "w"): pass return True def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in get_all_regular_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def _reap_old_files(self, dirpath, entry_type, grace=None): now = time.time() reaped = 0 for path in get_all_regular_files(dirpath): mtime = os.path.getmtime(path) age = now - mtime if not grace: LOG.debug("No grace period, reaping '%(path)s'" " immediately", {'path': path}) delete_cached_file(path) reaped += 1 elif age > grace: LOG.debug("Cache entry '%(path)s' exceeds grace period, " "(%(age)i s > %(grace)i s)", {'path': path, 'age': age, 'grace': grace}) delete_cached_file(path) reaped += 1 LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"), {'reaped': reaped, 'entry_type': entry_type}) return reaped def reap_invalid(self, grace=None): """Remove any invalid cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace) def reap_stalled(self, grace=None): """Remove any stalled cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.incomplete_dir, 'stalled', grace=grace) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.reap_invalid() if stall_time is None: stall_time = CONF.image_cache_stall_time self.reap_stalled(stall_time) def get_all_regular_files(basepath): for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if os.path.isfile(path): yield path def delete_cached_file(path): LOG.debug("Deleting image cache file '%s'", path) fileutils.delete_if_exists(path) def _make_namespaced_xattr_key(key, namespace='user'): """ Create a fully-qualified xattr-key by including the intended namespace. Namespacing differs among OSes[1]: FreeBSD: user, system Linux: user, system, trusted, security MacOS X: not needed Mac OS X won't break if we include a namespace qualifier, so, for simplicity, we always include it. -- [1] http://en.wikipedia.org/wiki/Extended_file_attributes """ namespaced_key = ".".join([namespace, key]) return namespaced_key def get_xattr(path, key, **kwargs): """Return the value for a particular xattr If the key doesn't not exist, or xattrs aren't supported by the file system then a KeyError will be raised, that is, unless you specify a default using kwargs. """ namespaced_key = _make_namespaced_xattr_key(key) try: return xattr.getxattr(path, namespaced_key) except IOError: if 'default' in kwargs: return kwargs['default'] else: raise def set_xattr(path, key, value): """Set the value of a specified xattr. If xattrs aren't supported by the file-system, we skip setting the value. """ namespaced_key = _make_namespaced_xattr_key(key) if not isinstance(value, bytes): value = str(value).encode('utf-8') xattr.setxattr(path, namespaced_key, value) def inc_xattr(path, key, n=1): """ Increment the value of an xattr (assuming it is an integer). BEWARE, this code *does* have a RACE CONDITION, since the read/update/write sequence is not atomic. Since the use-case for this function is collecting stats--not critical-- the benefits of simple, lock-free code out-weighs the possibility of an occasional hit not being counted. """ count = int(get_xattr(path, key)) count += n set_xattr(path, key, str(count)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/prefetcher.py0000664000175000017500000000657700000000000021234 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Prefetches images into the Image Cache """ import glance_store from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from glance.api import common as api_common from glance.common import exception from glance import context from glance.i18n import _LI, _LW from glance.image_cache import base CONF = cfg.CONF LOG = logging.getLogger(__name__) class Prefetcher(base.CacheApp): def __init__(self): # NOTE(abhishekk): Importing the glance.gateway just in time to avoid # import loop during initialization import glance.gateway # noqa super(Prefetcher, self).__init__() self.gateway = glance.gateway.Gateway() def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True, roles=['admin']) try: image_repo = self.gateway.get_repo(ctx) image = image_repo.get(image_id) except exception.NotFound: LOG.warning(_LW("Image '%s' not found"), image_id) return False if image.status != 'active': LOG.warning(_LW("Image '%s' is not active. Not caching."), image_id) return False for loc in image.locations: if CONF.enabled_backends: image_data, image_size = glance_store.get(loc['url'], None, context=ctx) else: image_data, image_size = glance_store.get_from_backend( loc['url'], context=ctx) LOG.debug("Caching image '%s'", image_id) cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, image.checksum) # Image is tee'd into cache and checksum verified # as we iterate list(cache_tee_iter) return True @lockutils.lock('glance-cache', external=True) def run(self): images = self.cache.get_queued_images() if not images: LOG.debug("Nothing to prefetch.") return True num_images = len(images) LOG.debug("Found %d images to prefetch", num_images) pool = api_common.get_thread_pool('prefetcher', size=num_images) results = pool.map(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: LOG.warning(_LW("Failed to successfully cache all " "images in queue.")) return False LOG.info(_LI("Successfully cached all %d images"), num_images) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/image_cache/pruner.py0000664000175000017500000000141600000000000020403 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Prunes the Image Cache """ from glance.image_cache import base class Pruner(base.CacheApp): def run(self): self.cache.prune() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.782294 glance-29.0.0/glance/locale/0000775000175000017500000000000000000000000015546 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/de/0000775000175000017500000000000000000000000016136 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000017723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/de/LC_MESSAGES/glance.po0000664000175000017500000016242400000000000021525 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Carsten Duch , 2014 # Ettore Atalan , 2014 # Laera Loris , 2013 # Robert Simai, 2014 # Andreas Jaeger , 2016. #zanata # Andreas Jaeger , 2019. #zanata # Robert Holling , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-03-14 06:07+0000\n" "Last-Translator: Robert Holling \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: German\n" #, python-format msgid "\t%s" msgstr "\t%s" msgid "" "\n" "Limit the request ID length.\n" "\n" "Provide an integer value to limit the length of the request ID to\n" "the specified length. The default value is 64. Users can change this\n" "to any ineteger value between 0 and 16384 however keeping in mind that\n" "a larger value may flood the logs.\n" "\n" "Possible values:\n" " * Integer value between 0 and 16384\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Begrenzen Sie die Länge der Anforderungs-ID.\n" "\n" "Geben Sie einen ganzzahligen Wert an, um die Länge der Anforderungs-ID \n" "auf die angegebene Länge. Der Standardwert ist 64. Benutzer können dies " "ändern \n" "auf jeden Integer-Wert zwischen 0 und 16384, wobei zu beachten ist, \n" "dass ein größerer Wert die Protokolle überfluten kann.\n" "\n" "Mögliche Werte:\n" " * Ganzzahliger Wert zwischen 0 und 16384\n" "\n" "Verwandte Optionen:\n" " * Keine\n" "\n" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Eine %(cls)s-Ausnahme ist im letzten RPC-Aufruf aufgetreten: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s in der Mitgliedsliste des Abbild %(i_id)s nicht gefunden." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) läuft..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s scheint bereits aktiv zu sein: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. Laden " "des Dateisystemspeichers nicht möglich" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. " "Fehlendes Arbeitsverzeichnis: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(serv)s mit %(conf)s %(verb)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Geben Sie ein Host:Port-Paar an, wobei 'Host' eine IPv4-Adresse, eine " "IPv6-Adresse, ein Hostname oder ein vollständig qualifizierter Domänenname " "ist. Bei Verwendung einer IPv6-Adresse schließen Sie diese in Klammern ein, " "damit sie vom Port getrennt ist (d. h. \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s darf keine 4-Byte-Unicode-Zeichen enthalten." #, python-format msgid "%s is already stopped" msgstr "%s ist bereits gestoppt" #, python-format msgid "%s is stopped" msgstr "%s ist gestoppt" #, python-format msgid "%s of uploaded data is different from current value set on the image." msgstr "" "%s der hoch geladenen Daten unterscheidet sich vom aktuellen Wert, der in " "dem Abbild festgelegt ist." msgid "'container_format' needs to be set before import" msgstr "'container_format' muss vor dem Import gesetzt werden" msgid "'disk_format' needs to be set before import" msgstr "'disk_format' muss vor dem Import gesetzt werden" msgid "'glance-direct' method is not available at this site." msgstr "Die 'glance-direct'-Methode ist auf dieser Site nicht verfügbar." msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Option --os_auth_url oder Umgebungsvariable OS_AUTH_URL erforderlich, wenn " "die Keystone-Authentifizierungsstrategie aktiviert ist\n" msgid "A body is not expected with this request." msgstr "Es wird kein Body bei dieser Anforderung erwartet. " #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ein Metadatendefinitionsobjekt namens %(object_name)s ist bereits in " "Namensbereich %(namespace_name)s nicht gefunden." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Eine Metadatendefinitionseigenschaft namens %(property_name)s ist bereits in " "Namensbereich %(namespace_name)s vorhanden. " #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ein Ressourcentyp %(resource_type_name)s der Metadatendefinition ist bereits " "vorhanden. " msgid "A set of URLs to access the image file kept in external store" msgstr "URLs für den Zugriff auf die Abbilddatei im externen Speicher" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Menge an Plattenspeicher (in GB), die zum Booten des Abbildes erforderlich " "ist." msgid "Amount of ram (in MB) required to boot image." msgstr "" "Menge an Arbeitsspeicher (in MB), die zum Booten des Abbildes erforderlich " "ist." msgid "An identifier for the image" msgstr "Eine ID für das Abbild" msgid "An identifier for the image member (tenantId)" msgstr "Eine ID für das Abbildelement (tenantId)" msgid "An identifier for the owner of this task" msgstr "Eine ID für den Eigentümer diesen Tasks" msgid "An identifier for the task" msgstr "Eine ID für die Task" msgid "An image file url" msgstr "URL der Abbilddatei" msgid "An image schema url" msgstr "URL des Abbildschemas" msgid "An image self url" msgstr "'self'-URL für Abbild" msgid "An import task exception occurred" msgstr "Es ist eine Ausnahme bei einer Importtask eingetreten." msgid "An object with the same identifier already exists." msgstr "Ein Objekt mit der gleichen ID ist bereits vorhanden." msgid "An object with the same identifier is currently being operated on." msgstr "An einem Objekt mit dieser ID wird derzeit eine Operation ausgeführt. " msgid "An object with the specified identifier was not found." msgstr "Ein Objekt mit der angegebenen ID wurde nicht gefunden." msgid "An unknown exception occurred" msgstr "Eine unbekannte Ausnahme ist aufgetreten" msgid "An unknown task exception occurred" msgstr "Eine unbekannte Taskausnahme ist aufgetreten" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Attribut '%(property)s' ist schreibgeschützt." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "Attribut '%(property)s' ist reserviert." #, python-format msgid "Attribute '%s' is read-only." msgstr "Attribut '%s' ist schreibgeschützt." #, python-format msgid "Attribute '%s' is reserved." msgstr "Attribut '%s' ist reserviert." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "Attribut 'container_format' kann nur durch ein Abbild in der Warteschlange " "ersetzt werden. " msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "Attribut 'disk_format' kann nur durch ein Abbild in der Warteschlange " "ersetzt werden. " #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Authentifizierungsservice unter URL %(url)s nicht gefunden." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Authentifizierungsfehler: Das Token ist möglicherweise beim Hochladen der " "Datei abgelaufen. Die Abbilddaten für %s werden gelöscht." msgid "Authorization failed." msgstr "Authorisierung fehlgeschlagen." msgid "Available categories:" msgstr "Verfügbare Kategorien:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Falsches \"%s\"-Abfragefilterformat. Verwenden Sie die ISO 8601 DateTime-" "Notation." #, python-format msgid "Bad header: %(header_name)s" msgstr "Fehlerhafter Header: %(header_name)s" msgid "Body expected in request." msgstr "Text in Anforderung erwartet." msgid "Cannot be a negative value" msgstr "Darf kein negativer Wert sein" msgid "Cannot be a negative value." msgstr "Darf kein negativer Wert sein." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Abbild %(key)s '%(value)s' kann nicht in eine Ganzzahl konvertiert werden. " msgid "" "Cannot delete image data from the only store containing it. Consider " "deleting the image instead." msgstr "" "Abbild Daten können nicht aus dem einzigen Speicher gelöscht werden, der sie " "enthält. Erwägen Sie stattdessen, das Abbild zu löschen." #, python-format msgid "Cannot delete staged image data %(fn)s [Errno %(en)d]" msgstr "" "Bereitgestellte Abbilddaten können nicht gelöscht werden %(fn)s [Errno " "%(en)d]" msgid "Cannot remove last location in the image." msgstr "Die letzte Position im Abbild kann nicht entfernt werden. " #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "" "Daten für Abbild %(image_id)s können nicht gespeichert werden: %(error)s" msgid "Cannot set locations to empty list." msgstr "Positionen können nicht auf leere Liste gesetzt werden. " #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Verifizierung von Kontrollsumme fehlgeschlagen. Zwischenspeichern von Image " "'%s' abgebrochen." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Verbindungsfehler/fehlerhafte Anforderung an Authentifizierungsservice unter " "URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "Erstellte URL: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Fehlerhafter Abbild-Download für Abbild %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Keine Bindung an %(host)s:%(port)s möglich nach Versuch über 30 Sekunden" msgid "Could not find OVF file in OVA archive file." msgstr "Es wurde keine OVF-Datei in der OVA-Archivdatei gefunden. " #, python-format msgid "Could not find metadata object %s" msgstr "Metadatenobjekt %s konnte nicht gefunden werden" #, python-format msgid "Could not find metadata tag %s" msgstr "Metadatenschlagwort %s konnte nicht gefunden werden" #, python-format msgid "Could not find property %s" msgstr "Eigenschaft %s konnte nicht gefunden werden" #, python-format msgid "Could not find task %s" msgstr "Task %s konnte nicht gefunden werden" #, python-format msgid "Could not update image: %s" msgstr "Abbild konnte nicht aktualisiert werden: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "Zurzeit werden OVA-Pakete mit mehreren Platten nicht unterstützt. " msgid "Data supplied was not valid." msgstr "Angegebene Daten waren nicht gültig." msgid "Date and time of image member creation" msgstr "Datum und Uhrzeit der Erstellung des Abbildelements" msgid "Date and time of image registration" msgstr "Datum und Uhrzeit der Abbildregistrierung " msgid "Date and time of last modification of image member" msgstr "Datum und Uhrzeit der letzten Änderung des Abbildelements" msgid "Date and time of namespace creation" msgstr "Datum und Uhrzeit der Erstellung des Namensbereichs" msgid "Date and time of object creation" msgstr "Datum und Uhrzeit der Objekterstellung" msgid "Date and time of resource type association" msgstr "Datum und Uhrzeit der Ressourcentypzuordnung" msgid "Date and time of tag creation" msgstr "Datum und Uhrzeit der Erstellung des Schlagwortes" msgid "Date and time of the last image modification" msgstr "Datum und Uhrzeit der letzten Abbildänderung" msgid "Date and time of the last namespace modification" msgstr "Datum und Uhrzeit der letzten Änderung des Namensbereichs" msgid "Date and time of the last object modification" msgstr "Datum und Uhrzeit der letzten Objektänderung" msgid "Date and time of the last resource type association modification" msgstr "Datum und Uhrzeit der letzten Änderung der Ressourcentypzuordnung" msgid "Date and time of the last tag modification" msgstr "Datum und Uhrzeit der letzten Schlagwortänderung" msgid "Datetime when this resource was created" msgstr "Datum/Uhrzeit der Erstellung dieser Ressource" msgid "Datetime when this resource was updated" msgstr "Datum/Uhrzeit der Aktualisierung dieser Ressource" msgid "Datetime when this resource would be subject to removal" msgstr "Datum/Uhrzeit, zu dem/der diese Ressource entfernt werden würde" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Versuch, das Abbild hochzuladen, wird verweigert, weil es das Kontingent " "überschreitet: %s" msgid "Descriptive name for the image" msgstr "Beschreibender Name für das Abbild" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Treiber %(driver_name)s konnte nicht ordnungsgemäß konfiguriert werden. " "Grund: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Fehler beim Entschlüsseln Ihrer Anforderung. Entweder die URL oder der " "angeforderte Body enthalten Zeichen, die von Glance nicht entschlüsselt " "werden konnten. " #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Fehler beim Abrufen der Mitglieder von Abbild %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Fehler in Speicherkonfiguration. Hinzufügen von Abbildern zu Speicher ist " "inaktiviert." #, python-format msgid "Error: %(exc_type)s: %(e)s" msgstr "Fehler: %(exc_type)s: %(e)s" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "" "Mitglied mit Angabe im folgenden Format erwartet: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "" "Status mit Angabe im folgenden Format erwartet: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Zu löschendes Abbild %(image_id)s wurde nicht gefunden" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Zu löschender Ressourcentyp %(resourcetype)s wurde nicht gefunden" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Die Image-Zwischenspeicherdatenbank wurde nicht initialisiert. Fehler: %s" #, python-format msgid "Failed to read %s from config" msgstr "Fehler beim Lesen von %s aus Konfiguration" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Fehler beim Hochladen von Abbilddaten für Abbild %(image_id)s wegen HTTP-" "Fehler: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Fehler beim Hochladen der Abbilddaten für das Abbild %(image_id)s auf Grund " "eines internen Fehlers: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "Datei %(path)s hat ungültige Sicherungsdatei %(bfile)s. Abbruch." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Dateibasierte Importe sind nicht zulässig. Verwenden Sie eine " "Imagedatenquelle, die nicht lokal ist." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Anforderung wird verboten, Metadatendefinitionsnamensbereich %s ist nicht " "sichtbar. " #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Anforderung wird nicht zugelassen, Task %s ist nicht sichtbar" msgid "Format of the container" msgstr "Format des Containers" msgid "Format of the disk" msgstr "Format der Festplatte" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" ist nicht gültig." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host und Port \"%s\" ist nicht gültig." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Informationsnachricht in Klarschrift nur eingeschlossen, wenn zweckdienlich " "(in der Regel bei einem Fehler)" msgid "" "If provided 'x-image-cache-clear-target' must be 'cache', 'queue' or empty " "string." msgstr "" "Falls angegeben, muss 'x-image-cache-clear-target' 'cache', 'queue' oder " "eine leere Zeichenfolge sein." msgid "If true, image will not be deletable." msgstr "Bei 'true' kann das Abbild nicht gelöscht werden." msgid "If true, namespace will not be deletable." msgstr "Bei 'true' kann der Namensbereich nicht gelöscht werden." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "Abbild %(id)s konnte nicht gelöscht werden, da es verwendet wird: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Abbild %(image_id)s wurde nach dem Upload nicht gefunden. Das Abbild wurde " "möglicherweise während des Uploads gelöscht: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "Abbild %(image_id)s ist geschützt und kann nicht gelöscht werden." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Abbild %s konnte nach dem Upload nicht gefunden werden. Das Abbild wurde " "möglicherweise beim Upload gelöscht. Die hochgeladenen Blöcke werden " "bereinigt." #, python-format msgid "Image %s not found." msgstr "Abbild %s nicht gefunden." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Das Abbild übersteigt das vorhandene Speicherkontingent: %s" msgid "Image id is required." msgstr "Abbild-ID ist erforderlich." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "Grenzwert für Abbildmitglieder für Abbild %(id)s überschritten: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Abbild-Statusänderung von %(cur_status)s nach %(new_status)s ist nicht " "erlaubt" #, python-format msgid "Image storage media is full: %s" msgstr "Datenträger zum Speichern des Abbildes ist voll: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Grenzwert für Abbildschlagwort für Abbild %(id)s überschritten: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problem beim Abbildupload: %s" msgid "Image with status active cannot be target for import" msgstr "" "Abbild mit dem Status aktiv kann nicht als Ziel für den Import verwendet " "werden" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Abbild mit der angegebenen ID %(image_id)s wurde nicht gefunden" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Falsche Authentifizierungsstrategie. Erwartet wurde \"%(expected)s\", " "empfangen wurde jedoch \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Falsche Anforderung: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Eingabe enthält nicht das Feld '%(key)s' " #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Nicht ausreichende Berechtigungen auf Abbildspeichermedien: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Ungültiger JSON Zeiger für diese Ressource: : '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "Ungültige Konfiguration in der Glance-Swift-Konfigurationsdatei." msgid "Invalid configuration in property protection file." msgstr "Ungültige Konfiguration in Eigenschaftsschutzdatei. " #, python-format msgid "Invalid content type %(content_type)s" msgstr "Ungültiger Inhaltstyp %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Ungültiger Filterwert %s. Das schließende Anführungszeichen fehlt." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Ungültiger Filterwert %s. Vor dem schließenden Anführungszeichen ist kein " "Komma." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Ungültiger Filterwert %s. Vor dem öffnenden Anführungszeichen ist kein Komma." msgid "Invalid location" msgstr "Ungültige Position" #, python-format msgid "Invalid location: %s" msgstr "Ungültiger Ort: %s" msgid "Invalid locations" msgstr "Ungültige Positionen" #, python-format msgid "Invalid locations: %s" msgstr "Unbekannte Stellen: %s" msgid "Invalid marker format" msgstr "Ungültiges Markerformat" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Ungültige Operation: '%(op)s'. Es muss eine der folgenden Optionen verwendet " "werden: %(available)s." msgid "Invalid position for adding a location." msgstr "Ungültige Position zum Hinzufügen einer Position." msgid "Invalid position for removing a location." msgstr "Ungültige Stelle zum Entfernen einer Position." msgid "Invalid service catalog json." msgstr "Ungültige Servicekatalog-JSON." #, python-format msgid "Invalid sort direction: %s" msgstr "Ungültige Sortierrichtung: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Ungültiger Sortierschlüssel: %(sort_key)s. Es muss einer der folgenden sein: " "%(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Ungültiger Statuswert: %s" #, python-format msgid "Invalid status: %s" msgstr "Ungültiger Status: %s" #, python-format msgid "Invalid type value: %s" msgstr "Ungültiger Wert für Typ: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Ungültige Aktualisierung. Sie würde zu einer doppelten " "Metadatendefinitionseigenschaft mit demselben Namen wie %s führen" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie wurde zu einem doppelten " "Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " "%(namespace_name)s führen." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie wurde zu einem doppelten " "Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " "%(namespace_name)s führen." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie würde zu einer doppelten " "Metadatendefinitionseigenschaft mit demselben Namen %(name)s im " "Namensbereich %(namespace_name)s führen. " #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Ungültiger Wert '%(value)s' für Parameter '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Ungültiger Wert für Option %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Ungültiger Sichtbarkeitswert: %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen hinzuzufügen, wenn die Positionen nicht " "sichtbar sind. " msgid "" "It's not allowed to remove image data from store if image status is not " "'active'" msgstr "" "Es ist nicht erlaubt, Abbild Daten aus dem Speicher zu löschen, wenn der " "Abbild Status nicht 'aktiv' ist" msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen zu entfernen, wenn die Positionen nicht " "sichtbar sind. " msgid "It's not allowed to update locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen zu aktualisieren, wenn die Positionen " "nicht sichtbar sind. " msgid "List of strings related to the image" msgstr "Liste mit dem Abbild zugehörigen Zeichenketten" msgid "Malformed JSON in request body." msgstr "Fehlerhafte JSON in Anforderungshauptteil." msgid "Maximal age is count of days since epoch." msgstr "Das maximale Alter entspricht der Anzahl von Tagen seit der Epoche." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Das Maximum an Umleitungen (%(redirects)s) wurde überschritten." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Mitglied %(member_id)s ist für Abbild %(image_id)s doppelt vorhanden" msgid "Member can't be empty" msgstr "Mitglied darf nicht leer sein" msgid "Member to be added not specified" msgstr "Hinzuzufügendes Element nicht angegeben" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Der Metadatendefinitionsnamensbereich %(namespace)s ist geschützt und kann " "nicht gelöscht werden." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Metadatendefinitionsnamensbereich für id=%s nicht gefunden" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "Das Metadatendefinitionsobjekt %(object_name)s ist geschützt und kann nicht " "gelöscht werden." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Metadatendefinitionsobjekt für id=%s nicht gefunden" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "Die Metadatendefinitionseigenschaft %(property_name)s ist geschützt und kann " "nicht gelöscht werden. " #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Metadatendefinitionseigenschaft für id=%s nicht gefunden" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Der Ressourcentyp %(resource_type_name)s der Metadatendefinition ist ein " "Basisdaten-Systemtyp und kann nicht gelöscht werden. " #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Die Ressourcentypzuordnung %(resource_type)s der Metadatendefinition ist " "geschützt und kann nicht gelöscht werden." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Der Metadatendefinitionstag %(tag_name)s ist geschützt und kann nicht " "gelöscht werden." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Metadatendefinitionstag für id=%s nicht gefunden" #, python-format msgid "Missing required credential: %(required)s" msgstr "Erforderlicher Berechtigungsnachweis fehlt: %(required)s" msgid "Multi backend is not supported at this site." msgstr "Multi-Backend wird auf dieser Website nicht unterstützt." #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Mehrere 'image'-Serviceübereinstimmungen für Region %(region)s. Dies weist " "im Allgemeinen darauf hin, dass eine Region erforderlich ist und dass Sie " "keine angegeben haben." #, python-format msgid "No image found with ID %s" msgstr "Es wurde kein Abbild mit der ID %s gefunden" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Keine Position mit ID %(loc)s von Abbild %(img)s gefunden" #, python-format msgid "Not allowed to create members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu erstellen." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Deaktivieren des Abbild im Status '%s' nicht zulässig" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu löschen." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu löschen." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Erneutes Aktivieren des Abbildes im Status '%s' nicht zulässig" #, python-format msgid "Not allowed to update members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu aktualisieren." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu aktualisieren." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Hochladen von Abbilddaten für Abbild %(image_id)s nicht zulässig: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Die Anzahl der Sortierverzeichnisse entspricht nicht der Anzahl der " "Sortierschlüssel" msgid "OVA extract is limited to admin" msgstr "OVA-Extraktion kann nur vom Administrator ausgeführt werden." msgid "Old and new sorting syntax cannot be combined" msgstr "Die alte und die neue Sortiersyntax können nicht kombiniert werden" msgid "Only images with status active can be targeted for copying" msgstr "" "Nur Abbilder mit dem Status aktiv können zum Kopieren ausgewählt werden" msgid "Only images with status active can be targeted for queueing" msgstr "" "Nur Abbilder mit dem Status aktiv können für die Warteschlange ausgewählt " "werden" msgid "Only shared images have members." msgstr "Nur öffentliche Abbilder haben Mitglieder." #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "Operation \"%s\" erfordert ein Element mit der Bezeichnung \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Operationsobjekte müssen genau ein Element mit der Bezeichnung \"add\", " "\"remove\" oder \"replace\" enthalten." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Operationsobjekte dürfen nur ein Element mit der Bezeichnung \"add\", " "\"remove\" oder \"replace\" enthalten." msgid "Operations must be JSON objects." msgstr "Operationen müssen JSON-Objekte sein." #, python-format msgid "Original locations is not empty: %s" msgstr "Originalpositionen sind nicht leer: %s" msgid "Owner can't be updated by non admin." msgstr "" "Eigner kann durch einen Benutzer, der kein Administrator ist, nicht " "aktualisiert werden." msgid "Owner of the image" msgstr "Eigentümer des Abbildes" msgid "Owner of the namespace." msgstr "Eigentümer des Namensbereichs. " msgid "Param values can't contain 4 byte unicode." msgstr "Parameterwerte dürfen kein 4-Byte-Unicode enthalten." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Zeiger `%s` enthält \"~\", das nicht Teil einer erkannten Escapezeichenfolge " "ist." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Der Zeiger `%s` enthält ein angrenzendes \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Der Zeiger `%s` enthält kein gültiges Token." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Zeiger `%s` beginnt nicht mit \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Der Zeiger `%s` endet mit einem \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Port \"%s\" ist nicht gültig." #, python-format msgid "Process %d not running" msgstr "Prozess %d wird nicht ausgeführt" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Eigenschaften %s müssen vor dem Speichern von Daten festgelegt werden." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "Eigenschaft %(property_name)s beginnt nicht mit dem erwarteten " "Zuordnungspräfix für Ressourcentypen '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Eigenschaft %s ist bereits vorhanden." #, python-format msgid "Property %s does not exist." msgstr "Eigenschaft %s ist nicht vorhanden." #, python-format msgid "Property %s may not be removed." msgstr "Eigenschaft %s darf nicht entfernt werden." #, python-format msgid "Property %s must be set prior to saving data." msgstr "Eigenschaft %s muss vor dem Speichern von Daten festgelegt werden." msgid "Property names can't contain 4 byte unicode." msgstr "Eigenschaftsnamen dürfen kein 4-Byte-Unicode enthalten." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Angegebenes Objekt passt nicht zu Schema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Der angegebene Status der Task wird nicht unterstützt: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Der angegebene Typ der Task wird nicht unterstützt: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "" "Stellt eine benutzerfreundliche Beschreibung des Namensbereichs bereit. " msgid "Purge command failed, check glance-manage logs for more details." msgstr "" "Der Löschbefehl ist fehlgeschlagen. Suchen Sie in den glance-manage-" "Protokolldateien nach weiteren Details." msgid "Received invalid HTTP redirect." msgstr "Ungültige HTTP-Umleitung erhalten." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Umleitung auf %(uri)s für Autorisierung." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Registrierungsdatenbank wurde nicht ordnungsgemäß auf einem API-Server " "konfiguriert. Grund: %(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Erneutes Laden von %(serv)s nicht unterstützt" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) erneut geladen" #, python-format msgid "Removing stale pid file %s" msgstr "Veraltete PID-Datei %s wird entfernt" msgid "Request body must be a JSON array of operation objects." msgstr "" "Anforderungshauptteil muss eine JSON-Array mit Operationsobjekten sein." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Antwort von Keystone enthält keinen Glance-Endpunkt." msgid "Scope of image accessibility" msgstr "Umfang der Abbildzugänglichkeit" msgid "Scope of namespace accessibility." msgstr "Umfang der Zugänglichkeit des Namensbereichs. " #, python-format msgid "Server %(serv)s is stopped" msgstr "Server %(serv)s wurde gestoppt" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Erstellung von Server-Worker fehlgeschlagen: %(reason)s." msgid "Signature verification failed" msgstr "Signaturverifizierung fehlgeschlagen" msgid "Size of image file in bytes" msgstr "Größe der Abbilddatei in Byte " msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Bei manchen Ressourcentypen sind mehrere Schlüssel/Wert-Paare pro Instanz " "zulässig. Cinder lässt z. B. Benutzer- und Abbildmetadaten für Datenträger " "zu. Nur die Metadaten der Imageeigenschaften werden von Nova ausgewertet " "(Planung oder Treiber). Diese Eigenschaft lässt zu, dass ein " "Namensbereichsziel die Mehrdeutigkeit entfernt. " msgid "Sort direction supplied was not valid." msgstr "Die angegebene Sortierrichtung war nicht gültig. " msgid "Sort key supplied was not valid." msgstr "Der angegebene Sortierschlüssel war nicht gültig. " msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Gibt das Präfix an, das für den angegebenen Ressourcentyp zu verwenden ist. " "Alle Eigenschaften im Namensbereich sollten dieses Präfix aufweisen, wenn " "sie auf den angegebenen Ressourcentyp angewendet werden. Muss " "Präfixtrennzeichen aufweisen (z. B. einen Doppelpunkt :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Status muss \"pending\", \"accepted\" oder \"rejected\" sein." msgid "Status not specified" msgstr "Status nicht angegeben" msgid "Status of the image" msgstr "Status des Abbildes" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Der Statusübergang von %(cur_status)s zu %(new_status)s ist nicht zulässig" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) gestoppt" msgid "Supported values for the 'container_format' image attribute" msgstr "Unterstützte Werte für das 'container_format' Abbild-Attribut" msgid "Supported values for the 'disk_format' image attribute" msgstr "Unterstützte Werte für das Abbildattribut 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Erneute Generierung wurde unterdrückt, da %(serv)s %(rsn)s war." msgid "System SIGHUP signal received." msgstr "System-SIGHUP-Signal empfangen. " #, python-format msgid "Task '%s' is required" msgstr "Task '%s' ist erforderlich" msgid "Task does not exist" msgstr "Task ist nicht vorhanden" msgid "Task failed due to Internal Error" msgstr "Task fehlgeschlagen. Grund: Interner Fehler" msgid "Task was not configured properly" msgstr "Die Task war nicht ordnungsgemäß konfiguriert" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Die Task mit der angegebenen ID %(task_id)s wurde nicht gefunden" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Der Filter \"changes-since\" ist bei Version 2 nicht mehr verfügbar." #, python-format msgid "The CA file you specified %s does not exist" msgstr "" "Die von Ihnen angegebene Zertifizierungsstellendatei %s ist nicht vorhanden" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "Das Objekt von Abbild %(image_id)s, das von Task %(task_id)s erstellt wurde, " "befindet sich nicht mehr in einem gültigen Status zur weiteren Verarbeitung." msgid "The Store URI was malformed." msgstr "Die Speicher-URI war fehlerhaft." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Die von Ihnen angegebene Zertifizierungsdatei %s ist nicht vorhanden" msgid "The current status of this task" msgstr "Der aktuelle Status dieser Task" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "Das Gerät, auf dem sich das Abbild-Zwischenspeicherverzeichnis " "%(image_cache_dir)s befindet, unterstützt xattr nicht. Wahrscheinlich müssen " "Sie fstab bearbeiten und die Option user_xattr zur entsprechenden Zeile für " "das Gerät, auf dem sich das Zwischenspeicherverzeichnis befindet, hinzufügen." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Der angegebene URI ist ungültig. Geben Sie einen gültigen URI aus der " "folgenden Liste mit unterstützten URIs %(supported)s an." #, python-format msgid "" "The image %s is already present on the target, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the target server." msgstr "" "Das Abbild %s ist bereits auf dem Target vorhanden, aber bei der Überprüfung " "wurde es nicht gefunden. Dies bedeutet, dass wir nicht über die " "Berechtigungen zum Anzeigen aller Abbilder auf dem Target-Server verfügen." #, python-format msgid "The incoming image is too large: %s" msgstr "Das eingehende Abbild ist zu groß: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Die von Ihnen angegebene Schlüsseldatei %s ist nicht vorhanden" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildpositionen wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildmitgliedern wurde für dieses " "Abbild überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildeigenschaften wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildschlagwörter wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Die Position %(location)s ist bereits vorhanden" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Die Position weist eine ungültige ID auf: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Die Metadatendefinition %(record_type)s namens %(record_name)s wurde nicht " "gelöscht. Andere Datensätze verweisen noch darauf. " #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Der Metadatendefinitionsnamensbereich %(namespace_name)s ist bereits " "vorhanden. " #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Das Metadatendefinitionsobjekt namens %(object_name)s wurde in Namensbereich " "%(namespace_name)s nicht gefunden. " #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Die Metadatendefinitionseigenschaft namens %(property_name)s wurde nicht in " "Namensbereich %(namespace_name)s gefunden. " #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " "%(resource_type_name)s und Namensbereich %(namespace_name)s ist bereits " "vorhanden." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " "%(resource_type_name)s und Namensbereich %(namespace_name)s wurde nicht " "gefunden." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Der Ressourcentyp %(resource_type_name)s der Metadatendefinition wurde nicht " "gefunden. " #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Der Metadatendefinitionstag namens %(name)s wurde in Namensbereich " "%(namespace_name)s nicht gefunden." msgid "The parameters required by task, JSON blob" msgstr "Die für die Task erforderlichen Parameter, JSON-Blob-Objekt" msgid "The provided image is too large." msgstr "Das angegebene Abbild ist zu groß." msgid "The request returned 500 Internal Server Error." msgstr "" "Die Anforderung hat eine Nachricht vom Typ '500 - interner Serverfehler' " "zurückgegeben." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "Die Anforderung hat eine Nachricht vom Typ '503 - Service nicht verfügbar' " "zurückgegeben. Dies geschieht im Allgemeinen bei einer Serviceüberbelastung " "oder einem anderen temporären Ausfall." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "Die Anforderung hat eine Nachricht vom Typ '302 - Mehrere Möglichkeiten' " "zurückgegeben. Dies weist im Allgemeinen darauf hin, dass Sie bei einem " "Anfrage-URI keinen Versionsindikator angegeben haben.\n" "\n" "Nachrichtentext der zurückgegebenen Antwort:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Die Anforderung hat eine Nachricht vom Typ '413 - Anforderungsentität zu " "groß' zurückgegeben. Dies weist im Allgemeinen darauf hin, dass die " "Geschwindigkeitsbegrenzung oder ein Kontingentschwellenwert überschritten " "wurde.\n" "\n" "Der Antworttext:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Die Anforderung hat einen unerwarteten Status zurückgegeben: %(status)s.\n" "\n" "Der Antworttext:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "Das angeforderte Abbild wurde deaktiviert. Der Download von Abbilddaten ist " "nicht zulässig. " msgid "The result of current task, JSON blob" msgstr "Das Ergebnis der aktuellen Task, JSON-Blob-Objekt" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "Die Größe der Daten, mit denen %(image_size)s den Grenzwert überschreiten " "wird. %(remaining)s Byte verbleiben." #, python-format msgid "The specified member %s could not be found" msgstr "Das angegebene Mitglied %s konnte nicht gefunden werden" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Das angegebene Metadatenobjekt %s konnte nicht gefunden werden" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Das angegebene Metadatenschlagwort %s konnte nicht gefunden werden" #, python-format msgid "The specified namespace %s could not be found" msgstr "Der angegebene Namensbereich %s konnte nicht gefunden werden" #, python-format msgid "The specified property %s could not be found" msgstr "Die angegebene Eigenschaft %s konnte nicht gefunden werden" #, python-format msgid "The specified resource type %s could not be found " msgstr "Der angegebene Ressourcentyp %s konnte nicht gefunden werden" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Der Status der Position des gelöschten Abbildes kann nur auf " "'pending_delete' oder auf 'deleted' gesetzt werden." msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Der Status der Position des gelöschten Abbild kann nur auf 'pending_delete' " "oder auf 'deleted' gesetzt werden." msgid "The status of this image member" msgstr "Der Status dieses Abbildelements" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Das Zielmitglied %(member_id)s ist dem Abbild %(image_id)s bereits " "zugeordnet." msgid "The type of task represented by this content" msgstr "Der Typ der durch diesen Inhalt dargestellten Task" msgid "The unique namespace text." msgstr "Der eindeutige Text für den Namensbereich. " msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Der benutzerfreundliche Name für den Namensbereich. Wird von der " "Benutzerschnittstelle verwendet, falls verfügbar. " msgid "There was an error configuring the client." msgstr "Fehler bei Konfiguration des Clients." msgid "There was an error connecting to a server" msgstr "Fehler beim Herstellen einer Verbindung zu einem Server." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Diese Operation ist derzeit bei Glance-Schlagwörtern nicht zulässig. Sie " "werden bei Erreichen der in der Eigenschaft 'expires_at' festgelegten Zeit " "automatisch gelöscht." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Zeit in Stunden, für die eine Task anschließend aktiv bleibt, entweder bei " "Erfolg oder bei Fehlschlag" msgid "Too few arguments." msgstr "Zu wenig Argumente" #, python-format msgid "" "Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" msgstr "Gesamtgröße ist %(size)d Byte (%(human_size)s) in %(img_count)d Images" msgid "URL to access the image file kept in external store" msgstr "URL für den Zugriff auf Abbilddatei in externem Speicher" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "PID-Datei %(pid)s kann nicht erstellt werden. Wird nicht als Root " "ausgeführt?\n" "Es wird auf eine temporäre Datei zurückgegriffen; Sie können den Dienst " "%(service)s stoppen mithilfe von:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Filtern mit dem unbekannten Operator '%s' nicht möglich." msgid "Unable to filter on a range with a non-numeric value." msgstr "Filtern in einem Bereich mit nicht numerischem Wert nicht möglich." msgid "Unable to filter on a unknown operator." msgstr "Filtern mit einem unbekannten Operator nicht möglich." msgid "Unable to filter using the specified operator." msgstr "Filtern mit dem angegebenen Operator nicht möglich." msgid "Unable to filter using the specified range." msgstr "Filtern mit dem angegebenen Bereich nicht möglich." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "'%s' kann in JSON-Schemaänderung nicht gefunden werden" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "'op' wurde in JSON-Schemaänderung nicht gefunden. Es muss eine der folgenden " "Optionen verwendet werden: %(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Grenzwert für Dateideskriptoren kann nicht erhöht werden. Wird nicht als " "Root ausgeführt?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "%(app_name)s kann nicht aus Konfigurationsdatei %(conf_file)s geladen " "werden.\n" "Abgerufen: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Schema kann nicht geladen werden: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Konfigurationsdatei zum Einfügen für %s konnte nicht gefunden werden." #, python-format msgid "Unable to upload duplicate image data for image %(image_id)s: %(error)s" msgstr "" "Hochladen von doppelten Abbilddaten für Abbild %(image_id)s nicht möglich: " "%(error)s" msgid "Unexpected body type. Expected list/dict." msgstr "Unerwarteter Hauptteiltyp. Erwartet wurde list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Unerwartete Antwort: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Unbekannte Authentifizierungsstrategie '%s'" #, python-format msgid "Unknown command: %s" msgstr "Unbekanntes Kommando: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Unbekannte Sortierrichtung; muss 'desc' oder 'asc' sein" msgid "Unrecognized JSON Schema draft version" msgstr "Unerkannte JSON-Schemaentwurfsversion" msgid "Virtual size of image in bytes" msgstr "Virtuelle Größe des Abbildes in Byte" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Es wurde 15 Sekunden auf den Abbruch von PID %(pid)s (%(file)s) gewartet; " "Abbruch" msgid "You are not authenticated." msgstr "Sie sind nicht authentifiziert." msgid "You are not authorized to complete this action." msgstr "Sie sind nicht dazu authorisiert, diese Aktion abzuschließen" #, python-format msgid "You are not authorized to lookup image %s." msgstr "Sie sind nicht berechtigt, Abbild %s zu suchen." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Sie sind nicht berechtigt, die Mitglieder des Abbild %s zu suchen." msgid "You are not permitted to create image members for the image." msgstr "Sie können keine Abbildelemente für das Abbild erstellen." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Sie können keine Abbilder erstellen, die '%s' gehören." msgid "You are not permitted to modify 'status' on this image member." msgstr "" "Sie sind nicht berechtigt, den 'status' dieses Abbild Mitglieds zu ändern." msgid "You cannot delete image member." msgstr "Sie können das Abbild Mitglied nicht löschen." msgid "You do not own this image" msgstr "Sie sind nicht Eigner dieses Images" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " "Sie haben ein Zertifikat angegeben. Allerdings haben Sie weder einen " "key_file-Parameter angegeben noch die GLANCE_CLIENT_KEY_FILE-" "Umgebungsvariable festgelegt" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " "Sie haben einen Schlüssel angegeben. Allerdings haben Sie weder einen " "cert_file-Parameter angegeben noch die GLANCE_CLIENT_CERT_FILE-" "Umgebungsvariable festgelegt" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() hat unerwartetes Schlüsselwortargument '%s' erhalten" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Übergang von %(current)s zu %(next)s in Aktualisierung nicht möglich " "(gewünscht ist from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "Benutzerdefinierte Eigenschaften (%(props)s) stehen im Konflikt mit " "Basiseigenschaften" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Hub weder für Eventlet 'poll' noch für 'selects' ist auf dieser Plattform " "verfügbar" msgid "limit param must be an integer" msgstr "'limit'-Parameter muss eine Ganzzahl sein" msgid "limit param must be positive" msgstr "'limit'-Parameter muss positiv sein" msgid "md5 hash of image contents." msgstr "md5-Hashwert von Abbildinhalten. " #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() hat unerwartete Schlüsselwörter %s erhalten" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s kann nicht gestartet werden. Fehler: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id ist zu lang. Max. Größe %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/en_GB/0000775000175000017500000000000000000000000016520 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000020305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/en_GB/LC_MESSAGES/glance.po0000664000175000017500000050705100000000000022106 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Abigail Brady , Bastien Nocera , 2012 # Andi Chandler , 2013 # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-08-19 09:31+0000\n" "Last-Translator: Andi Chandler \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" #, python-format msgid "\t%s" msgstr "\t%s" msgid "" "\n" " List of enabled Image Import Methods\n" "\n" " 'glance-direct', 'copy-image' and 'web-download' are enabled by " "default.\n" " 'glance-download' is available, but requires federated deployments.\n" "\n" " Related options:\n" " * [DEFAULT]/node_staging_uri" msgstr "" "\n" " List of enabled Image Import Methods\n" "\n" " 'glance-direct', 'copy-image' and 'web-download' are enabled by " "default.\n" " 'glance-download' is available, but requires federated deployments.\n" "\n" " Related options:\n" " * [DEFAULT]/node_staging_uri" msgid "" "\n" "AES key for encrypting store location metadata.\n" "\n" "Provide a string value representing the AES cipher to use for\n" "encrypting Glance store metadata.\n" "\n" "NOTE: The AES key to use must be set to a random string of length\n" "16, 24 or 32 bytes.\n" "\n" "Possible values:\n" " * String value representing a valid AES key\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "AES key for encrypting store location metadata.\n" "\n" "Provide a string value representing the AES cipher to use for\n" "encrypting Glance store metadata.\n" "\n" "NOTE: The AES key to use must be set to a random string of length\n" "16, 24 or 32 bytes.\n" "\n" "Possible values:\n" " * String value representing a valid AES key\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Absolute path to the directory where JSON metadefs files are stored.\n" "\n" "Glance Metadata Definitions (\"metadefs\") are served from the database,\n" "but are stored in files in the JSON format. The files in this\n" "directory are used to initialize the metadefs in the database.\n" "Additionally, when metadefs are exported from the database, the files\n" "are written to this directory.\n" "\n" "NOTE: If you plan to export metadefs, make sure that this directory\n" "has write permissions set for the user being used to run the\n" "glance-api service.\n" "\n" "Possible values:\n" " * String value representing a valid absolute pathname\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Absolute path to the directory where JSON metadefs files are stored.\n" "\n" "Glance Metadata Definitions (\"metadefs\") are served from the database,\n" "but are stored in files in the JSON format. The files in this\n" "directory are used to initialize the metadefs in the database.\n" "Additionally, when metadefs are exported from the database, the files\n" "are written to this directory.\n" "\n" "NOTE: If you plan to export metadefs, make sure that this directory\n" "has write permissions set for the user being used to run the\n" "glance-api service.\n" "\n" "Possible values:\n" " * String value representing a valid absolute pathname\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Absolute path to the work directory to use for asynchronous\n" "task operations.\n" "\n" "The directory set here will be used to operate over images -\n" "normally before they are imported in the destination store.\n" "\n" "NOTE: When providing a value for ``work_dir``, please make sure\n" "that enough space is provided for concurrent tasks to run\n" "efficiently without running out of space.\n" "\n" "A rough estimation can be done by multiplying the number of\n" "``max_workers`` with an average image size (e.g 500MB). The image\n" "size estimation should be done based on the average size in your\n" "deployment. Note that depending on the tasks running you may need\n" "to multiply this number by some factor depending on what the task\n" "does. For example, you may want to double the available size if\n" "image conversion is enabled. All this being said, remember these\n" "are just estimations and you should do them based on the worst\n" "case scenario and be prepared to act in case they were wrong.\n" "\n" "Possible values:\n" " * String value representing the absolute path to the working\n" " directory\n" "\n" "Related Options:\n" " * None\n" "\n" msgstr "" "\n" "Absolute path to the work directory to use for asynchronous\n" "task operations.\n" "\n" "The directory set here will be used to operate over images -\n" "normally before they are imported in the destination store.\n" "\n" "NOTE: When providing a value for ``work_dir``, please make sure\n" "that enough space is provided for concurrent tasks to run\n" "efficiently without running out of space.\n" "\n" "A rough estimation can be done by multiplying the number of\n" "``max_workers`` with an average image size (e.g 500MB). The image\n" "size estimation should be done based on the average size in your\n" "deployment. Note that depending on the tasks running you may need\n" "to multiply this number by some factor depending on what the task\n" "does. For example, you may want to double the available size if\n" "image conversion is enabled. All this being said, remember these\n" "are just estimations and you should do them based on the worst\n" "case scenario and be prepared to act in case they were wrong.\n" "\n" "Possible values:\n" " * String value representing the absolute path to the working\n" " directory\n" "\n" "Related Options:\n" " * None\n" "\n" msgid "" "\n" "Allow limited access to unauthenticated users.\n" "\n" "Assign a boolean to determine API access for unauthenticated\n" "users. When set to False, the API cannot be accessed by\n" "unauthenticated users. When set to True, unauthenticated users can\n" "access the API with read-only privileges. This however only applies\n" "when using ContextMiddleware.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Allow limited access to unauthenticated users.\n" "\n" "Assign a boolean to determine API access for unauthenticated\n" "users. When set to False, the API cannot be accessed by\n" "unauthenticated users. When set to True, unauthenticated users can\n" "access the API with read-only privileges. This however only applies\n" "when using ContextMiddleware.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Base directory for image cache.\n" "\n" "This is the location where image data is cached and served out of. All " "cached\n" "images are stored directly under this directory. This directory also " "contains\n" "three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``.\n" "\n" "The ``incomplete`` subdirectory is the staging area for downloading images. " "An\n" "image is first downloaded to this directory. When the image download is\n" "successful it is moved to the base directory. However, if the download " "fails,\n" "the partially downloaded image file is moved to the ``invalid`` " "subdirectory.\n" "\n" "The ``queue``subdirectory is used for queuing images for download. This is\n" "used primarily by the cache-prefetcher, which can be scheduled as a " "periodic\n" "task like cache-pruner and cache-cleaner, to cache images ahead of their " "usage.\n" "Upon receiving the request to cache an image, Glance touches a file in the\n" "``queue`` directory with the image id as the file name. The cache-" "prefetcher,\n" "when running, polls for the files in ``queue`` directory and starts\n" "downloading them in the order they were created. When the download is\n" "successful, the zero-sized file is deleted from the ``queue`` directory.\n" "If the download fails, the zero-sized file remains and it'll be retried the\n" "next time cache-prefetcher runs.\n" "\n" "Possible values:\n" " * A valid path\n" "\n" "Related options:\n" " * ``image_cache_sqlite_db``\n" "\n" msgstr "" "\n" "Base directory for image cache.\n" "\n" "This is the location where image data is cached and served out of. All " "cached\n" "images are stored directly under this directory. This directory also " "contains\n" "three subdirectories, namely, ``incomplete``, ``invalid`` and ``queue``.\n" "\n" "The ``incomplete`` subdirectory is the staging area for downloading images. " "An\n" "image is first downloaded to this directory. When the image download is\n" "successful it is moved to the base directory. However, if the download " "fails,\n" "the partially downloaded image file is moved to the ``invalid`` " "subdirectory.\n" "\n" "The ``queue``subdirectory is used for queuing images for download. This is\n" "used primarily by the cache-prefetcher, which can be scheduled as a " "periodic\n" "task like cache-pruner and cache-cleaner, to cache images ahead of their " "usage.\n" "Upon receiving the request to cache an image, Glance touches a file in the\n" "``queue`` directory with the image id as the file name. The cache-" "prefetcher,\n" "when running, polls for the files in ``queue`` directory and starts\n" "downloading them in the order they were created. When the download is\n" "successful, the zero-sized file is deleted from the ``queue`` directory.\n" "If the download fails, the zero-sized file remains and it'll be retried the\n" "next time cache-prefetcher runs.\n" "\n" "Possible values:\n" " * A valid path\n" "\n" "Related options:\n" " * ``image_cache_sqlite_db``\n" "\n" msgid "" "\n" "Calculate hash and checksum for the image.\n" "\n" "This configuration option indicates that /v2/images/{image_id}/locations\n" "POST API will calculate hash and checksum of the image on the fly.\n" "If False it will silently ignore the hash and checksum calculation.\n" "\n" "Possible values:\n" " * True\n" " * False\n" msgstr "" "\n" "Calculate hash and checksum for the image.\n" "\n" "This configuration option indicates that /v2/images/{image_id}/locations\n" "POST API will calculate hash and checksum of the image on the fly.\n" "If False it will silently ignore the hash and checksum calculation.\n" "\n" "Possible values:\n" " * True\n" " * False\n" msgid "" "\n" "Default publisher_id for outgoing Glance notifications.\n" "\n" "This is the value that the notification driver will use to identify\n" "messages for events originating from the Glance service. Typically,\n" "this is the hostname of the instance that generated the message.\n" "\n" "Possible values:\n" " * Any reasonable instance identifier, for example: image.host1\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Default publisher_id for outgoing Glance notifications.\n" "\n" "This is the value that the notification driver will use to identify\n" "messages for events originating from the Glance service. Typically,\n" "this is the hostname of the instance that generated the message.\n" "\n" "Possible values:\n" " * Any reasonable instance identifier, for example: image.host1\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Deployment flavor to use in the server application pipeline.\n" "\n" "Provide a string value representing the appropriate deployment\n" "flavor used in the server application pipeline. This is typically\n" "the partial name of a pipeline in the paste configuration file with\n" "the service name removed.\n" "\n" "For example, if your paste section name in the paste configuration\n" "file is [pipeline:glance-api-keystone], set ``flavor`` to\n" "``keystone``.\n" "\n" "Possible values:\n" " * String value representing a partial pipeline name.\n" "\n" "Related Options:\n" " * config_file\n" "\n" msgstr "" "\n" "Deployment flavour to use in the server application pipeline.\n" "\n" "Provide a string value representing the appropriate deployment\n" "flavour used in the server application pipeline. This is typically\n" "the partial name of a pipeline in the paste configuration file with\n" "the service name removed.\n" "\n" "For example, if your paste section name in the paste configuration\n" "file is [pipeline:glance-api-keystone], set ``flavor`` to\n" "``keystone``.\n" "\n" "Possible values:\n" " * String value representing a partial pipeline name.\n" "\n" "Related Options:\n" " * config_file\n" "\n" msgid "" "\n" "Desired output format for image conversion plugin.\n" "\n" "Provide a valid image format to which the conversion plugin\n" "will convert the image before storing it to the back-end.\n" "\n" "Note, if the Image Conversion plugin for image import is defined, users\n" "should only upload disk formats that are supported by `quemu-img` otherwise\n" "the conversion and import will fail.\n" "\n" "Possible values:\n" " * qcow2\n" " * raw\n" " * vmdk\n" "\n" "Related Options:\n" " * disk_formats\n" msgstr "" "\n" "Desired output format for image conversion plugin.\n" "\n" "Provide a valid image format to which the conversion plugin\n" "will convert the image before storing it to the back-end.\n" "\n" "Note, if the Image Conversion plugin for image import is defined, users\n" "should only upload disk formats that are supported by `quemu-img` otherwise\n" "the conversion and import will fail.\n" "\n" "Possible values:\n" " * qcow2\n" " * raw\n" " * vmdk\n" "\n" "Related Options:\n" " * disk_formats\n" msgid "" "\n" "Dictionary contains metadata properties to be injected in image.\n" "\n" "Possible values:\n" " * Dictionary containing key/value pairs. Key characters\n" " length should be <= 255. For example: k1:v1,k2:v2\n" "\n" "\n" msgstr "" "\n" "Dictionary contains metadata properties to be injected in image.\n" "\n" "Possible values:\n" " * Dictionary containing key/value pairs. Key characters\n" " length should be <= 255. For example: k1:v1,k2:v2\n" "\n" "\n" msgid "" "\n" "Digest algorithm to use for digital signature.\n" "\n" "Provide a string value representing the digest algorithm to\n" "use for generating digital signatures. By default, ``sha256``\n" "is used.\n" "\n" "To get a list of the available algorithms supported by the version\n" "of OpenSSL on your platform, run the command:\n" "``openssl list-message-digest-algorithms``.\n" "Examples are 'sha1', 'sha256', and 'sha512'.\n" "\n" "NOTE: ``digest_algorithm`` is not related to Glance's image signing\n" "and verification. It is only used to sign the universally unique\n" "identifier (UUID) as a part of the certificate file and key file\n" "validation.\n" "\n" "Possible values:\n" " * An OpenSSL message digest algorithm identifier\n" "\n" "Relation options:\n" " * None\n" "\n" msgstr "" "\n" "Digest algorithm to use for digital signature.\n" "\n" "Provide a string value representing the digest algorithm to\n" "use for generating digital signatures. By default, ``sha256``\n" "is used.\n" "\n" "To get a list of the available algorithms supported by the version\n" "of OpenSSL on your platform, run the command:\n" "``openssl list-message-digest-algorithms``.\n" "Examples are 'sha1', 'sha256', and 'sha512'.\n" "\n" "NOTE: ``digest_algorithm`` is not related to Glance's image signing\n" "and verification. It is only used to sign the universally unique\n" "identifier (UUID) as a part of the certificate file and key file\n" "validation.\n" "\n" "Possible values:\n" " * An OpenSSL message digest algorithm identifier\n" "\n" "Relation options:\n" " * None\n" "\n" msgid "" "\n" "File containing the swift account(s) configurations.\n" "\n" "Include a string value representing the path to a configuration\n" "file that has references for each of the configured Swift\n" "account(s)/backing stores. By default, no file path is specified\n" "and customized Swift referencing is disabled. Configuring this option\n" "is highly recommended while using Swift storage backend for image\n" "storage as it helps avoid storage of credentials in the\n" "database.\n" "\n" "Possible values:\n" " * None\n" " * String value representing a valid configuration file path\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "File containing the Swift account(s) configurations.\n" "\n" "Include a string value representing the path to a configuration\n" "file that has references for each of the configured Swift\n" "account(s)/backing stores. By default, no file path is specified\n" "and customized Swift referencing is disabled. Configuring this option\n" "is highly recommended while using the Swift storage backend for image\n" "storage as it helps avoid storage of credentials in the\n" "database.\n" "\n" "Possible values:\n" " * None\n" " * String value representing a valid configuration file path\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Host address of the pydev server.\n" "\n" "Provide a string value representing the hostname or IP of the\n" "pydev server to use for debugging. The pydev server listens for\n" "debug connections on this address, facilitating remote debugging\n" "in Glance.\n" "\n" "Possible values:\n" " * Valid hostname\n" " * Valid IP address\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Host address of the pydev server.\n" "\n" "Provide a string value representing the hostname or IP of the\n" "pydev server to use for debugging. The pydev server listens for\n" "debug connections on this address, facilitating remote debugging\n" "in Glance.\n" "\n" "Possible values:\n" " * Valid hostname\n" " * Valid IP address\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "IP address to bind the glance servers to.\n" "\n" "Provide an IP address to bind the glance server to. The default\n" "value is ``0.0.0.0``.\n" "\n" "Edit this option to enable the server to listen on one particular\n" "IP address on the network card. This facilitates selection of a\n" "particular network interface for the server.\n" "\n" "Possible values:\n" " * A valid IPv4 address\n" " * A valid IPv6 address\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "IP address to bind the Glance servers to.\n" "\n" "Provide an IP address to bind the Glance server to. The default\n" "value is ``0.0.0.0``.\n" "\n" "Edit this option to enable the server to listen on one particular\n" "IP address on the network card. This facilitates selection of a\n" "particular network interface for the server.\n" "\n" "Possible values:\n" " * A valid IPv4 address\n" " * A valid IPv6 address\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Image import plugins to be enabled for task processing.\n" "\n" "Provide list of strings reflecting to the task Objects\n" "that should be included to the Image Import flow. The\n" "task objects needs to be defined in the 'glance/async/\n" "flows/plugins/*' and may be implemented by OpenStack\n" "Glance project team, deployer or 3rd party.\n" "\n" "By default no plugins are enabled and to take advantage\n" "of the plugin model the list of plugins must be set\n" "explicitly in the glance-image-import.conf file.\n" "\n" "The allowed values for this option is comma separated\n" "list of object names in between ``[`` and ``]``.\n" "\n" "Possible values:\n" " * no_op (only logs debug level message that the\n" " plugin has been executed)\n" " * Any provided Task object name to be included\n" " in to the flow.\n" msgstr "" "\n" "Image import plugins to be enabled for task processing.\n" "\n" "Provide list of strings reflecting to the task Objects\n" "that should be included to the Image Import flow. The\n" "task objects needs to be defined in the 'glance/async/\n" "flows/plugins/*' and may be implemented by OpenStack\n" "Glance project team, deployer or 3rd party.\n" "\n" "By default no plugins are enabled and to take advantage\n" "of the plugin model the list of plugins must be set\n" "explicitly in the glance-image-import.conf file.\n" "\n" "The allowed values for this option is comma separated\n" "list of object names in between ``[`` and ``]``.\n" "\n" "Possible values:\n" " * no_op (only logs debug level message that the\n" " plugin has been executed)\n" " * Any provided Task object name to be included\n" " in to the flow.\n" msgid "" "\n" "Limit the request ID length.\n" "\n" "Provide an integer value to limit the length of the request ID to\n" "the specified length. The default value is 64. Users can change this\n" "to any ineteger value between 0 and 16384 however keeping in mind that\n" "a larger value may flood the logs.\n" "\n" "Possible values:\n" " * Integer value between 0 and 16384\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Limit the request ID length.\n" "\n" "Provide an integer value to limit the length of the request ID to\n" "the specified length. The default value is 64. Users can change this\n" "to any integer value between 0 and 16384 however keeping in mind that\n" "a larger value may flood the logs.\n" "\n" "Possible values:\n" " * Integer value between 0 and 16384\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "List of notifications to be disabled.\n" "\n" "Specify a list of notifications that should not be emitted.\n" "A notification can be given either as a notification type to\n" "disable a single event notification, or as a notification group\n" "prefix to disable all event notifications within a group.\n" "\n" "Possible values:\n" " A comma-separated list of individual notification types or\n" " notification groups to be disabled. Currently supported groups:\n" "\n" " * image\n" " * image.member\n" " * task\n" " * metadef_namespace\n" " * metadef_object\n" " * metadef_property\n" " * metadef_resource_type\n" " * metadef_tag\n" "\n" " For a complete listing and description of each event refer to:\n" " https://docs.openstack.org/glance/latest/admin/notifications.html\n" "\n" " The values must be specified as: .\n" " For example: image.create,task.success,metadef_tag\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "List of notifications to be disabled.\n" "\n" "Specify a list of notifications that should not be emitted.\n" "A notification can be given either as a notification type to\n" "disable a single event notification or as a notification group\n" "prefix to disable all event notifications within a group.\n" "\n" "Possible values:\n" " A comma-separated list of individual notification types or\n" " notification groups to be disabled. Currently supported groups:\n" "\n" " * image\n" " * image.member\n" " * task\n" " * metadef_namespace\n" " * metadef_object\n" " * metadef_property\n" " * metadef_resource_type\n" " * metadef_tag\n" "\n" " For a complete listing and description of each event refer to:\n" " https://docs.openstack.org/glance/latest/admin/notifications.html\n" "\n" " The values must be specified as: .\n" " For example: image.create,task.success,metadef_tag\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Maximum amount of image storage per tenant.\n" "\n" "This enforces an upper limit on the cumulative storage consumed by all " "images\n" "of a tenant across all stores. This is a per-tenant limit.\n" "\n" "The default unit for this configuration option is Bytes. However, storage\n" "units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``,\n" "``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and\n" "TeraBytes respectively. Note that there should not be any space between the\n" "value and unit. Value ``0`` signifies no quota enforcement. Negative values\n" "are invalid and result in errors.\n" "\n" "This has no effect if ``use_keystone_limits`` is enabled.\n" "\n" "Possible values:\n" " * A string that is a valid concatenation of a non-negative integer\n" " representing the storage value and an optional string literal\n" " representing storage units as mentioned above.\n" "\n" "Related options:\n" " * use_keystone_limits\n" "\n" msgstr "" "\n" "Maximum amount of image storage per tenant.\n" "\n" "This enforces an upper limit on the cumulative storage consumed by all " "images\n" "of a tenant across all stores. This is a per-tenant limit.\n" "\n" "The default unit for this configuration option is Bytes. However, storage\n" "units can be specified using case-sensitive literals ``B``, ``KB``, ``MB``,\n" "``GB`` and ``TB`` representing Bytes, KiloBytes, MegaBytes, GigaBytes and\n" "TeraBytes respectively. Note that there should not be any space between the\n" "value and unit. Value ``0`` signifies no quota enforcement. Negative values\n" "are invalid and result in errors.\n" "\n" "This has no effect if ``use_keystone_limits`` is enabled.\n" "\n" "Possible values:\n" " * A string that is a valid concatenation of a non-negative integer\n" " representing the storage value and an optional string literal\n" " representing storage units as mentioned above.\n" "\n" "Related options:\n" " * use_keystone_limits\n" "\n" msgid "" "\n" "Maximum line size of message headers.\n" "\n" "Provide an integer value representing a length to limit the size of\n" "message headers. The default value is 16384.\n" "\n" "NOTE: ``max_header_line`` may need to be increased when using large\n" "tokens (typically those generated by the Keystone v3 API with big\n" "service catalogs). However, it is to be kept in mind that larger\n" "values for ``max_header_line`` would flood the logs.\n" "\n" "Setting ``max_header_line`` to 0 sets no limit for the line size of\n" "message headers.\n" "\n" "Possible values:\n" " * 0\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Maximum line size of message headers.\n" "\n" "Provide an integer value representing a length to limit the size of\n" "message headers. The default value is 16384.\n" "\n" "NOTE: ``max_header_line`` may need to be increased when using large\n" "tokens (typically those generated by the Keystone v3 API with big\n" "service catalogues). However, it is to be kept in mind that larger\n" "values for ``max_header_line`` would flood the logs.\n" "\n" "Setting ``max_header_line`` to 0 sets no limit for the line size of\n" "message headers.\n" "\n" "Possible values:\n" " * 0\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Maximum number of image members per image.\n" "\n" "This limits the maximum of users an image can be shared with. Any negative\n" "value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Maximum number of image members per image.\n" "\n" "This limits the maximum of users an image can be shared with. Any negative\n" "value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Maximum number of locations allowed on an image.\n" "\n" "Any negative value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Maximum number of locations allowed on an image.\n" "\n" "Any negative value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Maximum number of properties allowed on an image.\n" "\n" "This enforces an upper limit on the number of additional properties an " "image\n" "can have. Any negative value is interpreted as unlimited.\n" "\n" msgstr "" "\n" "Maximum number of properties allowed on an image.\n" "\n" "This enforces an upper limit on the number of additional properties an " "image\n" "can have. Any negative value is interpreted as unlimited.\n" "\n" msgid "" "\n" "Maximum number of results that could be returned by a request.\n" "\n" "As described in the help text of ``limit_param_default``, some\n" "requests may return multiple results. The number of results to be\n" "returned are governed either by the ``limit`` parameter in the\n" "request or the ``limit_param_default`` configuration option.\n" "The value in either case, can't be greater than the absolute maximum\n" "defined by this configuration option. Anything greater than this\n" "value is trimmed down to the maximum value defined here.\n" "\n" "NOTE: Setting this to a very large value may slow down database\n" " queries and increase response times. Setting this to a\n" " very low value may result in poor user experience.\n" "\n" "Possible values:\n" " * Any positive integer\n" "\n" "Related options:\n" " * limit_param_default\n" "\n" msgstr "" "\n" "Maximum number of results that could be returned by a request.\n" "\n" "As described in the help text of ``limit_param_default``, some\n" "requests may return multiple results. The number of results to be\n" "returned are governed either by the ``limit`` parameter in the\n" "request or the ``limit_param_default`` configuration option.\n" "The value in either case, can't be greater than the absolute maximum\n" "defined by this configuration option. Anything greater than this\n" "value is trimmed down to the maximum value defined here.\n" "\n" "NOTE: Setting this to a very large value may slow down database\n" " queries and increase response times. Setting this to a\n" " very low value may result in poor user experience.\n" "\n" "Possible values:\n" " * Any positive integer\n" "\n" "Related options:\n" " * limit_param_default\n" "\n" msgid "" "\n" "Maximum number of tags allowed on an image.\n" "\n" "Any negative value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Maximum number of tags allowed on an image.\n" "\n" "Any negative value is interpreted as unlimited.\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Maximum size of image a user can upload in bytes.\n" "\n" "An image upload greater than the size mentioned here would result\n" "in an image creation failure. This configuration option defaults to\n" "1099511627776 bytes (1 TiB).\n" "\n" "NOTES:\n" " * This value should only be increased after careful\n" " consideration and must be set less than or equal to\n" " 8 EiB (9223372036854775808).\n" " * This value must be set with careful consideration of the\n" " backend storage capacity. Setting this to a very low value\n" " may result in a large number of image failures. And, setting\n" " this to a very large value may result in faster consumption\n" " of storage. Hence, this must be set according to the nature of\n" " images created and storage capacity available.\n" "\n" "Possible values:\n" " * Any positive number less than or equal to 9223372036854775808\n" "\n" msgstr "" "\n" "Maximum size of image a user can upload in bytes.\n" "\n" "An image upload greater than the size mentioned here would result\n" "in an image creation failure. This configuration option defaults to\n" "1,099,511,627,776 bytes (1 TiB).\n" "\n" "NOTES:\n" " * This value should only be increased after careful\n" " consideration and must be set less than or equal to\n" " 8 EiB (9,223,372,036,854,775,808).\n" " * This value must be set with careful consideration of the\n" " backend storage capacity. Setting this to a very low value\n" " may result in a large number of image failures. And, setting\n" " this to a very large value may result in faster consumption\n" " of storage. Hence, this must be set according to the nature of\n" " images created and storage capacity available.\n" "\n" "Possible values:\n" " * Any positive number less than or equal to 9,223,372,036,854,775,808\n" "\n" msgid "" "\n" "Name of the paste configuration file.\n" "\n" "Provide a string value representing the name of the paste\n" "configuration file to use for configuring pipelines for\n" "server application deployments.\n" "\n" "NOTES:\n" " * Provide the name or the path relative to the glance directory\n" " for the paste configuration file and not the absolute path.\n" " * The sample paste configuration file shipped with Glance need\n" " not be edited in most cases as it comes with ready-made\n" " pipelines for all common deployment flavors.\n" "\n" "If no value is specified for this option, the ``paste.ini`` file\n" "with the prefix of the corresponding Glance service's configuration\n" "file name will be searched for in the known configuration\n" "directories. (For example, if this option is missing from or has no\n" "value set in ``glance-api.conf``, the service will look for a file\n" "named ``glance-api-paste.ini``.) If the paste configuration file is\n" "not found, the service will not start.\n" "\n" "Possible values:\n" " * A string value representing the name of the paste configuration\n" " file.\n" "\n" "Related Options:\n" " * flavor\n" "\n" msgstr "" "\n" "Name of the paste configuration file.\n" "\n" "Provide a string value representing the name of the paste\n" "configuration file to use for configuring pipelines for\n" "server application deployments.\n" "\n" "NOTES:\n" " * Provide the name or the path relative to the glance directory\n" " for the paste configuration file and not the absolute path.\n" " * The sample paste configuration file shipped with Glance needs\n" " not be edited in most cases as it comes with ready-made\n" " pipelines for all common deployment flavours.\n" "\n" "If no value is specified for this option, the ``paste.ini`` file\n" "with the prefix of the corresponding Glance service's configuration\n" "file name will be searched for in the known configuration\n" "directories. (For example, if this option is missing from or has no\n" "value set in ``glance-api.conf``, the service will look for a file\n" "named ``glance-api-paste.ini``.) If the paste configuration file is\n" "not found, the service will not start.\n" "\n" "Possible values:\n" " * A string value representing the name of the paste configuration\n" " file.\n" "\n" "Related Options:\n" " * flavour\n" "\n" msgid "" "\n" "Number of Glance worker processes to start.\n" "\n" "Provide a non-negative integer value to set the number of child\n" "process workers to service requests. By default, the number of CPUs\n" "available is set as the value for ``workers`` limited to 8. For\n" "example if the processor count is 6, 6 workers will be used, if the\n" "processor count is 24 only 8 workers will be used. The limit will only\n" "apply to the default value, if 24 workers is configured, 24 is used.\n" "\n" "Each worker process is made to listen on the port set in the\n" "configuration file and contains a greenthread pool of size 1000.\n" "\n" "NOTE: Setting the number of workers to zero, triggers the creation\n" "of a single API process with a greenthread pool of size 1000.\n" "\n" "Possible values:\n" " * 0\n" " * Positive integer value (typically equal to the number of CPUs)\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Number of Glance worker processes to start.\n" "\n" "Provide a non-negative integer value to set the number of child\n" "process workers to service requests. By default, the number of CPUs\n" "available is set as the value for ``workers`` limited to 8. For\n" "example if the processor count is 6, 6 workers will be used, if the\n" "processor count is 24 only 8 workers will be used. The limit will only\n" "apply to the default value, if 24 workers is configured, 24 is used.\n" "\n" "Each worker process is made to listen on the port set in the\n" "configuration file and contains a greenthread pool of size 1000.\n" "\n" "NOTE: Setting the number of workers to zero, triggers the creation\n" "of a single API process with a greenthread pool of size 1000.\n" "\n" "Possible values:\n" " * 0\n" " * Positive integer value (typically equal to the number of CPUs)\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Path to the python interpreter to use when spawning external\n" "processes. If left unspecified, this will be sys.executable, which should\n" "be the same interpreter running Glance itself. However, in some situations\n" "(for example, uwsgi) sys.executable may not actually point to a python\n" "interpreter and an alternative value must be set." msgstr "" "\n" "Path to the Python interpreter to use when spawning external\n" "processes. If left unspecified, this will be sys.executable, which should\n" "be the same interpreter running Glance itself. However, in some situations\n" "(for example, uwsgi) sys.executable may not actually point to a Python\n" "interpreter and an alternative value must be set." msgid "" "\n" "Port number on which the server will listen.\n" "\n" "Provide a valid port number to bind the server's socket to. This\n" "port is then set to identify processes and forward network messages\n" "that arrive at the server. The default bind_port value for the API\n" "server is 9292 and for the registry server is 9191.\n" "\n" "Possible values:\n" " * A valid port number (0 to 65535)\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Port number on which the server will listen.\n" "\n" "Provide a valid port number to bind the server's socket to. This\n" "port is then set to identify processes and forward network messages\n" "that arrive at the server. The default bind_port value for the API\n" "server is 9292 and for the registry server is 9191.\n" "\n" "Possible values:\n" " * A valid port number (0 to 65535)\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Port number that the pydev server will listen on.\n" "\n" "Provide a port number to bind the pydev server to. The pydev\n" "process accepts debug connections on this port and facilitates\n" "remote debugging in Glance.\n" "\n" "Possible values:\n" " * A valid port number\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Port number that the pydev server will listen on.\n" "\n" "Provide a port number to bind the pydev server to. The pydev\n" "process accepts debug connections on this port and facilitates\n" "remote debugging in Glance.\n" "\n" "Possible values:\n" " * A valid port number\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Public url endpoint to use for Glance versions response.\n" "\n" "This is the public url endpoint that will appear in the Glance\n" "\"versions\" response. If no value is specified, the endpoint that is\n" "displayed in the version's response is that of the host running the\n" "API service. Change the endpoint to represent the proxy URL if the\n" "API service is running behind a proxy. If the service is running\n" "behind a load balancer, add the load balancer's URL for this value.\n" "\n" "Possible values:\n" " * None\n" " * Proxy URL\n" " * Load balancer URL\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Public URL endpoint to use for Glance versions response.\n" "\n" "This is the public URL endpoint that will appear in the Glance\n" "\"versions\" response. If no value is specified, the endpoint that is\n" "displayed in the version's response is that of the host running the\n" "API service. Change the endpoint to represent the proxy URL if the\n" "API service is running behind a proxy. If the service is running\n" "behind a load balancer, add the load balancer's URL for this value.\n" "\n" "Possible values:\n" " * None\n" " * Proxy URL\n" " * Load balancer URL\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Reference to default Swift account/backing store parameters.\n" "\n" "Provide a string value representing a reference to the default set\n" "of parameters required for using swift account/backing store for\n" "image storage. The default reference value for this configuration\n" "option is 'ref1'. This configuration option dereferences the\n" "parameters and facilitates image storage in Swift storage backend\n" "every time a new image is added.\n" "\n" "Possible values:\n" " * A valid string value\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Reference to default Swift account/backing store parameters.\n" "\n" "Provide a string value representing a reference to the default set\n" "of parameters required for using Swift account/backing store for\n" "image storage. The default reference value for this configuration\n" "option is 'ref1'. This configuration option dereferences the\n" "parameters and facilitates image storage in Swift storage backend\n" "every time a new image is added.\n" "\n" "Possible values:\n" " * A valid string value\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Restore the image status from 'pending_delete' to 'active'.\n" "\n" "This option is used by administrator to reset the image's status from\n" "'pending_delete' to 'active' when the image is deleted by mistake and\n" "'pending delete' feature is enabled in Glance. Please make sure the\n" "glance-scrubber daemon is stopped before restoring the image to avoid image\n" "data inconsistency.\n" "\n" "Possible values:\n" " * image's uuid\n" "\n" msgstr "" "\n" "Restore the image status from 'pending_delete' to 'active'.\n" "\n" "This option is used by administrator to reset the image's status from\n" "'pending_delete' to 'active' when the image is deleted by mistake and\n" "'pending delete' feature is enabled in Glance. Please make sure the\n" "glance-scrubber daemon is stopped before restoring the image to avoid image\n" "data inconsistency.\n" "\n" "Possible values:\n" " * image's uuid\n" "\n" msgid "" "\n" "Rule format for property protection.\n" "\n" "Provide the desired way to set property protection on Glance\n" "image properties. The two permissible values are ``roles``\n" "and ``policies``. The default value is ``roles``.\n" "\n" "If the value is ``roles``, the property protection file must\n" "contain a comma separated list of user roles indicating\n" "permissions for each of the CRUD operations on each property\n" "being protected. If set to ``policies``, a policy defined in\n" "policy.yaml is used to express property protections for each\n" "of the CRUD operations. Examples of how property protections\n" "are enforced based on ``roles`` or ``policies`` can be found at:\n" "https://docs.openstack.org/glance/latest/admin/property-protections." "html#examples\n" "\n" "Possible values:\n" " * roles\n" " * policies\n" "\n" "Related options:\n" " * property_protection_file\n" "\n" msgstr "" "\n" "Rule format for property protection.\n" "\n" "Provide the desired way to set property protection on Glance\n" "image properties. The two permissible values are ``roles``\n" "and ``policies``. The default value is ``roles``.\n" "\n" "If the value is ``roles``, the property protection file must\n" "contain a comma separated list of user roles indicating\n" "permissions for each of the CRUD operations on each property\n" "being protected. If set to ``policies``, a policy defined in\n" "policy.yaml is used to express property protections for each\n" "of the CRUD operations. Examples of how property protections\n" "are enforced based on ``roles`` or ``policies`` can be found at:\n" "https://docs.openstack.org/glance/latest/admin/property-protections." "html#examples\n" "\n" "Possible values:\n" " * roles\n" " * policies\n" "\n" "Related options:\n" " * property_protection_file\n" "\n" msgid "" "\n" "Run scrubber as a daemon.\n" "\n" "This boolean configuration option indicates whether scrubber should\n" "run as a long-running process that wakes up at regular intervals to\n" "scrub images. The wake up interval can be specified using the\n" "configuration option ``wakeup_time``.\n" "\n" "If this configuration option is set to ``False``, which is the\n" "default value, scrubber runs once to scrub images and exits. In this\n" "case, if the operator wishes to implement continuous scrubbing of\n" "images, scrubber needs to be scheduled as a cron job.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * ``wakeup_time``\n" "\n" msgstr "" "\n" "Run scrubber as a daemon.\n" "\n" "This boolean configuration option indicates whether scrubber should\n" "run as a long-running process that wakes up at regular intervals to\n" "scrub images. The wake up interval can be specified using the\n" "configuration option ``wakeup_time``.\n" "\n" "If this configuration option is set to ``False``, which is the\n" "default value, scrubber runs once to scrub images and exits. In this\n" "case, if the operator wishes to implement continuous scrubbing of\n" "images, scrubber needs to be scheduled as a cron job.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * ``wakeup_time``\n" "\n" msgid "" "\n" "Secure hashing algorithm used for computing the 'os_hash_value' property.\n" "\n" "This option configures the Glance \"multihash\", which consists of two\n" "image properties: the 'os_hash_algo' and the 'os_hash_value'. The\n" "'os_hash_algo' will be populated by the value of this configuration\n" "option, and the 'os_hash_value' will be populated by the hexdigest computed\n" "when the algorithm is applied to the uploaded or imported image data.\n" "\n" "The value must be a valid secure hash algorithm name recognized by the\n" "python 'hashlib' library. You can determine what these are by examining\n" "the 'hashlib.algorithms_available' data member of the version of the\n" "library being used in your Glance installation. For interoperability\n" "purposes, however, we recommend that you use the set of secure hash\n" "names supplied by the 'hashlib.algorithms_guaranteed' data member because\n" "those algorithms are guaranteed to be supported by the 'hashlib' library\n" "on all platforms. Thus, any image consumer using 'hashlib' locally should\n" "be able to verify the 'os_hash_value' of the image.\n" "\n" "The default value of 'sha512' is a performant secure hash algorithm.\n" "\n" "If this option is misconfigured, any attempts to store image data will " "fail.\n" "For that reason, we recommend using the default value.\n" "\n" "Possible values:\n" " * Any secure hash algorithm name recognized by the Python 'hashlib'\n" " library\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Secure hashing algorithm used for computing the 'os_hash_value' property.\n" "\n" "This option configures the Glance \"multihash\", which consists of two\n" "image properties: the 'os_hash_algo' and the 'os_hash_value'. The\n" "'os_hash_algo' will be populated by the value of this configuration\n" "option, and the 'os_hash_value' will be populated by the hexdigest computed\n" "when the algorithm is applied to the uploaded or imported image data.\n" "\n" "The value must be a valid secure hash algorithm name recognised by the\n" "python 'hashlib' library. You can determine what these are by examining\n" "the 'hashlib.algorithms_available' data member of the version of the\n" "library being used in your Glance installation. For interoperability\n" "purposes, however, we recommend that you use the set of secure hash\n" "names supplied by the 'hashlib.algorithms_guaranteed' data member because\n" "those algorithms are guaranteed to be supported by the 'hashlib' library\n" "on all platforms. Thus, any image consumer using 'hashlib' locally should\n" "be able to verify the 'os_hash_value' of the image.\n" "\n" "The default value of 'sha512' is a performant secure hash algorithm.\n" "\n" "If this option is misconfigured, any attempts to store image data will " "fail.\n" "For that reason, we recommend using the default value.\n" "\n" "Possible values:\n" " * Any secure hash algorithm name recognised by the Python 'hashlib'\n" " library\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Set keep alive option for HTTP over TCP.\n" "\n" "Provide a boolean value to determine sending of keep alive packets.\n" "If set to ``False``, the server returns the header\n" "\"Connection: close\". If set to ``True``, the server returns a\n" "\"Connection: Keep-Alive\" in its responses. This enables retention of\n" "the same TCP connection for HTTP conversations instead of opening a\n" "new one with each new request.\n" "\n" "This option must be set to ``False`` if the client socket connection\n" "needs to be closed explicitly after the response is received and\n" "read successfully by the client.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Set keep alive option for HTTP over TCP.\n" "\n" "Provide a boolean value to determine sending of keep alive packets.\n" "If set to ``False``, the server returns the header\n" "\"Connection: close\". If set to ``True``, the server returns a\n" "\"Connection: Keep-Alive\" in its responses. This enables retention of\n" "the same TCP connection for HTTP conversations instead of opening a\n" "new one with each new request.\n" "\n" "This option must be set to ``False`` if the client socket connection\n" "needs to be closed explicitly after the response is received and\n" "read successfully by the client.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Set the desired image conversion format.\n" "\n" "Provide a valid image format to which you want images to be\n" "converted before they are stored for consumption by Glance.\n" "Appropriate image format conversions are desirable for specific\n" "storage backends in order to facilitate efficient handling of\n" "bandwidth and usage of the storage infrastructure.\n" "\n" "By default, ``conversion_format`` is not set and must be set\n" "explicitly in the configuration file.\n" "\n" "The allowed values for this option are ``raw``, ``qcow2`` and\n" "``vmdk``. The ``raw`` format is the unstructured disk format and\n" "should be chosen when RBD or Ceph storage backends are used for\n" "image storage. ``qcow2`` is supported by the QEMU emulator that\n" "expands dynamically and supports Copy on Write. The ``vmdk`` is\n" "another common disk format supported by many common virtual machine\n" "monitors like VMWare Workstation.\n" "\n" "Possible values:\n" " * qcow2\n" " * raw\n" " * vmdk\n" "\n" "Related options:\n" " * disk_formats\n" "\n" msgstr "" "\n" "Set the desired image conversion format.\n" "\n" "Provide a valid image format to which you want images to be\n" "converted before they are stored for consumption by Glance.\n" "Appropriate image format conversions are desirable for specific\n" "storage backends in order to facilitate efficient handling of\n" "bandwidth and usage of the storage infrastructure.\n" "\n" "By default, ``conversion_format`` is not set and must be set\n" "explicitly in the configuration file.\n" "\n" "The allowed values for this option are ``raw``, ``qcow2`` and\n" "``vmdk``. The ``raw`` format is the unstructured disk format and\n" "should be chosen when RBD or Ceph storage backends are used for\n" "image storage. ``qcow2`` is supported by the QEMU emulator that\n" "expands dynamically and supports Copy on Write. The ``vmdk`` is\n" "another common disk format supported by many common virtual machine\n" "monitors like VMWare Workstation.\n" "\n" "Possible values:\n" " * qcow2\n" " * raw\n" " * vmdk\n" "\n" "Related options:\n" " * disk_formats\n" "\n" msgid "" "\n" "Set the number of engine executable tasks.\n" "\n" "Provide an integer value to limit the number of workers that can be\n" "instantiated on the hosts. In other words, this number defines the\n" "number of parallel tasks that can be executed at the same time by\n" "the taskflow engine. This value can be greater than one when the\n" "engine mode is set to parallel.\n" "\n" "Possible values:\n" " * Integer value greater than or equal to 1\n" "\n" "Related options:\n" " * engine_mode\n" "\n" msgstr "" "\n" "Set the number of engine executable tasks.\n" "\n" "Provide an integer value to limit the number of workers that can be\n" "instantiated on the hosts. In other words, this number defines the\n" "number of parallel tasks that can be executed at the same time by\n" "the taskflow engine. This value can be greater than one when the\n" "engine mode is set to parallel.\n" "\n" "Possible values:\n" " * Integer value greater than or equal to 1\n" "\n" "Related options:\n" " * engine_mode\n" "\n" msgid "" "\n" "Set the number of incoming connection requests.\n" "\n" "Provide a positive integer value to limit the number of requests in\n" "the backlog queue. The default queue size is 4096.\n" "\n" "An incoming connection to a TCP listener socket is queued before a\n" "connection can be established with the server. Setting the backlog\n" "for a TCP socket ensures a limited queue size for incoming traffic.\n" "\n" "Possible values:\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Set the number of incoming connection requests.\n" "\n" "Provide a positive integer value to limit the number of requests in\n" "the backlog queue. The default queue size is 4096.\n" "\n" "An incoming connection to a TCP listener socket is queued before a\n" "connection can be established with the server. Setting the backlog\n" "for a TCP socket ensures a limited queue size for incoming traffic.\n" "\n" "Possible values:\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Set the taskflow engine mode.\n" "\n" "Provide a string type value to set the mode in which the taskflow\n" "engine would schedule tasks to the workers on the hosts. Based on\n" "this mode, the engine executes tasks either in single or multiple\n" "threads. The possible values for this configuration option are:\n" "``serial`` and ``parallel``. When set to ``serial``, the engine runs\n" "all the tasks in a single thread which results in serial execution\n" "of tasks. Setting this to ``parallel`` makes the engine run tasks in\n" "multiple threads. This results in parallel execution of tasks.\n" "\n" "Possible values:\n" " * serial\n" " * parallel\n" "\n" "Related options:\n" " * max_workers\n" "\n" msgstr "" "\n" "Set the taskflow engine mode.\n" "\n" "Provide a string type value to set the mode in which the taskflow\n" "engine would schedule tasks to the workers on the hosts. Based on\n" "this mode, the engine executes tasks either in single or multiple\n" "threads. The possible values for this configuration option are:\n" "``serial`` and ``parallel``. When set to ``serial``, the engine runs\n" "all the tasks in a single thread which results in serial execution\n" "of tasks. Setting this to ``parallel`` makes the engine run tasks in\n" "multiple threads. This results in parallel execution of tasks.\n" "\n" "Possible values:\n" " * serial\n" " * parallel\n" "\n" "Related options:\n" " * max_workers\n" "\n" msgid "" "\n" "Set the wait time before a connection recheck.\n" "\n" "Provide a positive integer value representing time in seconds which\n" "is set as the idle wait time before a TCP keep alive packet can be\n" "sent to the host. The default value is 600 seconds.\n" "\n" "Setting ``tcp_keepidle`` helps verify at regular intervals that a\n" "connection is intact and prevents frequent TCP connection\n" "reestablishment.\n" "\n" "Possible values:\n" " * Positive integer value representing time in seconds\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Set the wait time before a connection recheck.\n" "\n" "Provide a positive integer value representing time in seconds which\n" "is set as the idle wait time before a TCP keep alive packet can be\n" "sent to the host. The default value is 600 seconds.\n" "\n" "Setting ``tcp_keepidle`` helps verify at regular intervals that a\n" "connection is intact and prevents frequent TCP connection\n" "re-establishment.\n" "\n" "Possible values:\n" " * Positive integer value representing time in seconds\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Show all image locations when returning an image.\n" "\n" "This configuration option indicates whether to show all the image\n" "locations when returning image details to the user. When multiple\n" "image locations exist for an image, the locations are ordered based\n" "on the store weightage assigned for each store indicated by the\n" "configuration option ``weight``. The image locations are shown\n" "under the image property ``locations``.\n" "\n" "NOTES:\n" " * Revealing image locations can present a GRAVE SECURITY RISK as\n" " image locations can sometimes include credentials. Hence, this\n" " is set to ``False`` by default. Set this to ``True`` with\n" " EXTREME CAUTION and ONLY IF you know what you are doing!\n" " * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more\n" " information.\n" " * If an operator wishes to avoid showing any image location(s)\n" " to the user, then both this option and\n" " ``show_image_direct_url`` MUST be set to ``False``.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * show_image_direct_url\n" " * weight\n" "\n" msgstr "" "\n" "Show all image locations when returning an image.\n" "\n" "This configuration option indicates whether to show all the image\n" "locations when returning image details to the user. When multiple\n" "image locations exist for an image, the locations are ordered based\n" "on the store weightage assigned for each store indicated by the\n" "configuration option ``weight``. The image locations are shown\n" "under the image property ``locations``.\n" "\n" "NOTES:\n" " * Revealing image locations can present a GRAVE SECURITY RISK as\n" " image locations can sometimes include credentials. Hence, this\n" " is set to ``False`` by default. Set this to ``True`` with\n" " EXTREME CAUTION and ONLY IF you know what you are doing!\n" " * See https://wiki.openstack.org/wiki/OSSN/OSSN-0065 for more\n" " information.\n" " * If an operator wishes to avoid showing any image location(s)\n" " to the user, then both this option and\n" " ``show_image_direct_url`` MUST be set to ``False``.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * show_image_direct_url\n" " * weight\n" "\n" msgid "" "\n" "Show direct image location when returning an image.\n" "\n" "This configuration option indicates whether to show the direct image\n" "location when returning image details to the user. The direct image\n" "location is where the image data is stored in backend storage. This\n" "image location is shown under the image property ``direct_url``.\n" "\n" "When multiple image locations exist for an image, the best location\n" "is displayed based on the store weightage assigned for each store\n" "indicated by the configuration option ``weight``.\n" "\n" "NOTES:\n" " * Revealing image locations can present a GRAVE SECURITY RISK as\n" " image locations can sometimes include credentials. Hence, this\n" " is set to ``False`` by default. Set this to ``True`` with\n" " EXTREME CAUTION and ONLY IF you know what you are doing!\n" " * If an operator wishes to avoid showing any image location(s)\n" " to the user, then both this option and\n" " ``show_multiple_locations`` MUST be set to ``False``.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * show_multiple_locations\n" " * weight\n" "\n" msgstr "" "\n" "Show direct image location when returning an image.\n" "\n" "This configuration option indicates whether to show the direct image\n" "location when returning image details to the user. The direct image\n" "location is where the image data is stored in backend storage. This\n" "image location is shown under the image property ``direct_url``.\n" "\n" "When multiple image locations exist for an image, the best location\n" "is displayed based on the store weightage assigned for each store\n" "indicated by the configuration option ``weight``.\n" "\n" "NOTES:\n" " * Revealing image locations can present a GRAVE SECURITY RISK as\n" " image locations can sometimes include credentials. Hence, this\n" " is set to ``False`` by default. Set this to ``True`` with\n" " EXTREME CAUTION and ONLY IF you know what you are doing!\n" " * If an operator wishes to avoid showing any image location(s)\n" " to the user, then both this option and\n" " ``show_multiple_locations`` MUST be set to ``False``.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * show_multiple_locations\n" " * weight\n" "\n" msgid "" "\n" "Specify metadata prefix to be set on the target image when using\n" "glance-download. All other properties coming from the source image won't be " "set\n" "on the target image. If specified metadata does not exist on the source " "image\n" "it won't be set on the target image. Note you can't set the os_glance " "prefix\n" "as it is reserved by glance, so the related properties won't be set on the\n" "target image.\n" "\n" "Possible values:\n" " * List containing extra_properties prefixes: ['os_', 'architecture']\n" "\n" msgstr "" "\n" "Specify metadata prefix to be set on the target image when using\n" "glance-download. All other properties coming from the source image won't be " "set\n" "on the target image. If specified metadata does not exist on the source " "image\n" "it won't be set on the target image. Note you can't set the os_glance " "prefix\n" "as it is reserved by Glance, so the related properties won't be set on the\n" "target image.\n" "\n" "Possible values:\n" " * List containing extra_properties prefixes: ['os_', 'architecture']\n" "\n" msgid "" "\n" "Specify name of user roles to be ignored for injecting metadata\n" "properties in the image.\n" "\n" "Possible values:\n" " * List containing user roles. For example: [admin,member]\n" "\n" msgstr "" "\n" "Specify name of user roles to be ignored for injecting metadata\n" "properties in the image.\n" "\n" "Possible values:\n" " * List containing user roles. For example: [admin,member]\n" "\n" msgid "" "\n" "Task executor to be used to run task scripts.\n" "\n" "Provide a string value representing the executor to use for task\n" "executions. By default, ``TaskFlow`` executor is used.\n" "\n" "``TaskFlow`` helps make task executions easy, consistent, scalable\n" "and reliable. It also enables creation of lightweight task objects\n" "and/or functions that are combined together into flows in a\n" "declarative manner.\n" "\n" "Possible values:\n" " * taskflow\n" "\n" "Related Options:\n" " * None\n" "\n" msgstr "" "\n" "Task executor to be used to run task scripts.\n" "\n" "Provide a string value representing the executor to use for task\n" "executions. By default, ``TaskFlow`` executor is used.\n" "\n" "``TaskFlow`` helps make task executions easy, consistent, scalable\n" "and reliable. It also enables creation of lightweight task objects\n" "and/or functions that are combined together into flows in a\n" "declarative manner.\n" "\n" "Possible values:\n" " * taskflow\n" "\n" "Related Options:\n" " * None\n" "\n" msgid "" "\n" "The URL provides location where the temporary data will be stored\n" "\n" "This option is for Glance internal use only. Glance will save the\n" "image data uploaded by the user to 'staging' endpoint during the\n" "image import process.\n" "\n" "This option does not change the 'staging' API endpoint by any means.\n" "\n" "NOTE: It is discouraged to use same path as [task]/work_dir\n" "\n" "NOTE: 'file://' is the only option\n" "api_image_import flow will support for now.\n" "\n" "NOTE: The staging path must be on shared filesystem available to all\n" "Glance API nodes.\n" "\n" "Possible values:\n" " * String starting with 'file://' followed by absolute FS path\n" "\n" "Related options:\n" " * [task]/work_dir\n" "\n" msgstr "" "\n" "The URL provides location where the temporary data will be stored\n" "\n" "This option is for Glance internal use only. Glance will save the\n" "image data uploaded by the user to 'staging' endpoint during the\n" "image import process.\n" "\n" "This option does not change the 'staging' API endpoint by any means.\n" "\n" "NOTE: It is discouraged to use same path as [task]/work_dir\n" "\n" "NOTE: 'file://' is the only option\n" "api_image_import flow will support for now.\n" "\n" "NOTE: The staging path must be on shared filesystem available to all\n" "Glance API nodes.\n" "\n" "Possible values:\n" " * String starting with 'file://' followed by absolute FS path\n" "\n" "Related options:\n" " * [task]/work_dir\n" "\n" msgid "" "\n" "The URL to this worker.\n" "\n" "If this is set, other glance workers will know how to contact this one\n" "directly if needed. For image import, a single worker stages the image\n" "and other workers need to be able to proxy the import request to the\n" "right one.\n" "\n" "If unset, this will be considered to be `public_endpoint`, which\n" "normally would be set to the same value on all workers, effectively\n" "disabling the proxying behavior.\n" "\n" "Possible values:\n" " * A URL by which this worker is reachable from other workers\n" "\n" "Related options:\n" " * public_endpoint\n" "\n" msgstr "" "\n" "The URL to this worker.\n" "\n" "If this is set, other glance workers will know how to contact this one\n" "directly if needed. For image import, a single worker stages the image\n" "and other workers need to be able to proxy the import request to the\n" "right one.\n" "\n" "If unset, this will be considered to be `public_endpoint`, which\n" "normally would be set to the same value on all workers, effectively\n" "disabling the proxying behaviour.\n" "\n" "Possible values:\n" " * A URL by which this worker is reachable from other workers\n" "\n" "Related options:\n" " * public_endpoint\n" "\n" msgid "" "\n" "The amount of time, in seconds, an incomplete image remains in the cache.\n" "\n" "Incomplete images are images for which download is in progress. Please see " "the\n" "description of configuration option ``image_cache_dir`` for more detail.\n" "Sometimes, due to various reasons, it is possible the download may hang and\n" "the incompletely downloaded image remains in the ``incomplete`` directory.\n" "This configuration option sets a time limit on how long the incomplete " "images\n" "should remain in the ``incomplete`` directory before they are cleaned up.\n" "Once an incomplete image spends more time than is specified here, it'll be\n" "removed by cache-cleaner on its next run.\n" "\n" "It is recommended to run cache-cleaner as a periodic task on the Glance API\n" "nodes to keep the incomplete images from occupying disk space.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "The amount of time, in seconds, an incomplete image remains in the cache.\n" "\n" "Incomplete images are images for which download is in progress. Please see " "the\n" "description of configuration option ``image_cache_dir`` for more detail.\n" "Sometimes, due to various reasons, it is possible the download may hang and\n" "the incompletely downloaded image remains in the ``incomplete`` directory.\n" "This configuration option sets a time limit on how long the incomplete " "images\n" "should remain in the ``incomplete`` directory before they are cleaned up.\n" "Once an incomplete image spends more time than is specified here, it'll be\n" "removed by cache-cleaner on its next run.\n" "\n" "It is recommended to run cache-cleaner as a periodic task on the Glance API\n" "nodes to keep the incomplete images from occupying disk space.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "The amount of time, in seconds, to delay image scrubbing.\n" "\n" "When delayed delete is turned on, an image is put into ``pending_delete``\n" "state upon deletion until the scrubber deletes its image data. Typically, " "soon\n" "after the image is put into ``pending_delete`` state, it is available for\n" "scrubbing. However, scrubbing can be delayed until a later point using this\n" "configuration option. This option denotes the time period an image spends " "in\n" "``pending_delete`` state before it is available for scrubbing.\n" "\n" "It is important to realize that this has storage implications. The larger " "the\n" "``scrub_time``, the longer the time to reclaim backend storage from deleted\n" "images.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * ``delayed_delete``\n" "\n" msgstr "" "\n" "The amount of time, in seconds, to delay image scrubbing.\n" "\n" "When delayed delete is turned on, an image is put into ``pending_delete``\n" "state upon deletion until the scrubber deletes its image data. Typically, " "soon\n" "after the image is put into ``pending_delete`` state, it is available for\n" "scrubbing. However, scrubbing can be delayed until a later point using this\n" "configuration option. This option denotes the time period an image spends " "in\n" "``pending_delete`` state before it is available for scrubbing.\n" "\n" "It is important to realise that this has storage implications. The larger " "the\n" "``scrub_time``, the longer the time to reclaim backend storage from deleted\n" "images.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * ``delayed_delete``\n" "\n" msgid "" "\n" "The default number of results to return for a request.\n" "\n" "Responses to certain API requests, like list images, may return\n" "multiple items. The number of results returned can be explicitly\n" "controlled by specifying the ``limit`` parameter in the API request.\n" "However, if a ``limit`` parameter is not specified, this\n" "configuration value will be used as the default number of results to\n" "be returned for any API request.\n" "\n" "NOTES:\n" " * The value of this configuration option may not be greater than\n" " the value specified by ``api_limit_max``.\n" " * Setting this to a very large value may slow down database\n" " queries and increase response times. Setting this to a\n" " very low value may result in poor user experience.\n" "\n" "Possible values:\n" " * Any positive integer\n" "\n" "Related options:\n" " * api_limit_max\n" "\n" msgstr "" "\n" "The default number of results to return for a request.\n" "\n" "Responses to certain API requests, like list images, may return\n" "multiple items. The number of results returned can be explicitly\n" "controlled by specifying the ``limit`` parameter in the API request.\n" "However, if a ``limit`` parameter is not specified, this\n" "configuration value will be used as the default number of results to\n" "be returned for any API request.\n" "\n" "NOTES:\n" " * The value of this configuration option may not be greater than\n" " the value specified by ``api_limit_max``.\n" " * Setting this to a very large value may slow down database\n" " queries and increase response times. Setting this to a\n" " very low value may result in poor user experience.\n" "\n" "Possible values:\n" " * Any positive integer\n" "\n" "Related options:\n" " * api_limit_max\n" "\n" msgid "" "\n" "The driver to use for image cache management.\n" "\n" "This configuration option provides the flexibility to choose between the\n" "different image-cache drivers available. An image-cache driver is " "responsible\n" "for providing the essential functions of image-cache like write images to/" "read\n" "images from cache, track age and usage of cached images, provide a list of\n" "cached images, fetch size of the cache, queue images for caching and clean " "up\n" "the cache, etc.\n" "\n" "The essential functions of a driver are defined in the base class\n" "``glance.image_cache.drivers.base.Driver``. All image-cache drivers " "(existing\n" "and prospective) must implement this interface. Currently available drivers\n" "are ``sqlite`` and ``xattr``. These drivers primarily differ in the way " "they\n" "store the information about cached images:\n" "\n" "* The ``centralized_db`` driver uses a central database (which will be " "common\n" " for all glance nodes) to track the usage of cached images.\n" "* The ``sqlite`` (deprecated) driver uses a sqlite database (which sits on\n" " every glance node locally) to track the usage of cached images.\n" "* The ``xattr`` driver uses the extended attributes of files to store this\n" " information. It also requires a filesystem that sets ``atime`` on the " "files\n" " when accessed.\n" "\n" "Deprecation warning:\n" " * As centralized database will now be used for image cache management, " "the\n" " use of `sqlite` database and driver will be dropped from 'E' (2025.1)\n" " development cycle.\n" "\n" "Possible values:\n" " * centralized_db\n" " * sqlite\n" " * xattr\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "The driver to use for image cache management.\n" "\n" "This configuration option provides the flexibility to choose between the\n" "different image-cache drivers available. An image-cache driver is " "responsible\n" "for providing the essential functions of image-cache like writing images to/" "read\n" "images from the cache, track the age and usage of cached images, and provide " "a list of\n" "cached images, fetch the size of the cache, queue images for caching and " "clean up\n" "the cache, etc.\n" "\n" "The essential functions of a driver are defined in the base class\n" "``glance.image_cache.drivers.base.Driver``. All image-cache drivers " "(existing\n" "and prospective) must implement this interface. Currently, available " "drivers\n" "are ``sqlite`` and ``xattr``. These drivers primarily differ in the way " "they\n" "store the information about cached images:\n" "\n" "* The ``centralized_db`` driver uses a central database (which will be " "common\n" " for all glance nodes) to track the usage of cached images.\n" "* The ``sqlite`` (deprecated) driver uses a SQLite database (which sits on\n" " every glance node locally) to track the usage of cached images.\n" "* The ``xattr`` driver uses the extended attributes of files to store this\n" " information. It also requires a filesystem that sets ``atime`` on the " "files\n" " when accessed.\n" "\n" "Deprecation warning:\n" " * As a centralized database will now be used for image cache management, " "the\n" " use of `sqlite` database and driver will be dropped from 'E' (2025.1)\n" " development cycle.\n" "\n" "Possible values:\n" " * centralized_db\n" " * sqlite\n" " * xattr\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "The location of the property protection file.\n" "\n" "Provide a valid path to the property protection file which contains\n" "the rules for property protections and the roles/policies associated\n" "with them.\n" "\n" "A property protection file, when set, restricts the Glance image\n" "properties to be created, read, updated and/or deleted by a specific\n" "set of users that are identified by either roles or policies.\n" "If this configuration option is not set, by default, property\n" "protections won't be enforced. If a value is specified and the file\n" "is not found, the glance-api service will fail to start.\n" "More information on property protections can be found at:\n" "https://docs.openstack.org/glance/latest/admin/property-protections.html\n" "\n" "Possible values:\n" " * Empty string\n" " * Valid path to the property protection configuration file\n" "\n" "Related options:\n" " * property_protection_rule_format\n" "\n" msgstr "" "\n" "The location of the property protection file.\n" "\n" "Provide a valid path to the property protection file which contains\n" "the rules for property protections and the roles/policies associated\n" "with them.\n" "\n" "A property protection file, when set, restricts the Glance image\n" "properties to be created, read, updated and/or deleted by a specific\n" "set of users that are identified by either roles or policies.\n" "If this configuration option is not set, by default, property\n" "protections won't be enforced. If a value is specified and the file\n" "is not found, the glance-api service will fail to start.\n" "More information on property protections can be found at:\n" "https://docs.openstack.org/glance/latest/admin/property-protections.html\n" "\n" "Possible values:\n" " * Empty string\n" " * Valid path to the property protection configuration file\n" "\n" "Related options:\n" " * property_protection_rule_format\n" "\n" msgid "" "\n" "The number of threads (per worker process) in the pool for processing\n" "asynchronous tasks. This controls how many asynchronous tasks (i.e. for\n" "image interoperable import) each worker can run at a time. If this is\n" "too large, you *may* have increased memory footprint per worker and/or you\n" "may overwhelm other system resources such as disk or outbound network\n" "bandwidth. If this is too small, image import requests will have to wait\n" "until a thread becomes available to begin processing." msgstr "" "\n" "The number of threads (per worker process) in the pool for processing\n" "asynchronous tasks. This controls how many asynchronous tasks (i.e. for\n" "image interoperable import) each worker can run at a time. If this is\n" "too large, you *may* have increased memory footprint per worker and/or you\n" "may overwhelm other system resources such as disk or outbound network\n" "bandwidth. If this is too small, image import requests will have to wait\n" "until a thread becomes available to begin processing." msgid "" "\n" "The number of times to retry when any operation fails.\n" msgstr "" "\n" "The number of times to retry when any operation fails.\n" msgid "" "\n" "The relative path to sqlite file database that will be used for image cache\n" "management.\n" "\n" "This is a relative path to the sqlite file database that tracks the age and\n" "usage statistics of image cache. The path is relative to image cache base\n" "directory, specified by the configuration option ``image_cache_dir``.\n" "\n" "This is a lightweight database with just one table.\n" "\n" "Possible values:\n" " * A valid relative path to sqlite file database\n" "\n" "Related options:\n" " * ``image_cache_dir``\n" "\n" msgstr "" "\n" "The relative path to sqlite file database that will be used for image cache\n" "management.\n" "\n" "This is a relative path to the sqlite file database that tracks the age and\n" "usage statistics of image cache. The path is relative to image cache base\n" "directory, specified by the configuration option ``image_cache_dir``.\n" "\n" "This is a lightweight database with just one table.\n" "\n" "Possible values:\n" " * A valid relative path to sqlite file database\n" "\n" "Related options:\n" " * ``image_cache_dir``\n" "\n" msgid "" "\n" "The size of thread pool to be used for scrubbing images.\n" "\n" "When there are a large number of images to scrub, it is beneficial to scrub\n" "images in parallel so that the scrub queue stays in control and the backend\n" "storage is reclaimed in a timely fashion. This configuration option denotes\n" "the maximum number of images to be scrubbed in parallel. The default value " "is\n" "one, which signifies serial scrubbing. Any value above one indicates " "parallel\n" "scrubbing.\n" "\n" "Possible values:\n" " * Any non-zero positive integer\n" "\n" "Related options:\n" " * ``delayed_delete``\n" "\n" msgstr "" "\n" "The size of thread pool to be used for scrubbing images.\n" "\n" "When there are a large number of images to scrub, it is beneficial to scrub\n" "images in parallel so that the scrub queue stays in control and the backend\n" "storage is reclaimed in a timely fashion. This configuration option denotes\n" "the maximum number of images to be scrubbed in parallel. The default value " "is\n" "one, which signifies serial scrubbing. Any value above one indicates " "parallel\n" "scrubbing.\n" "\n" "Possible values:\n" " * Any non-zero positive integer\n" "\n" "Related options:\n" " * ``delayed_delete``\n" "\n" msgid "" "\n" "The upper limit on cache size, in bytes, after which the cache-pruner " "cleans\n" "up the image cache.\n" "\n" "NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a\n" "hard limit beyond which the image cache would never grow. In fact, " "depending\n" "on how often the cache-pruner runs and how quickly the cache fills, the " "image\n" "cache can far exceed the size specified here very easily. Hence, care must " "be\n" "taken to appropriately schedule the cache-pruner and in setting this limit.\n" "\n" "Glance caches an image when it is downloaded. Consequently, the size of the\n" "image cache grows over time as the number of downloads increases. To keep " "the\n" "cache size from becoming unmanageable, it is recommended to run the\n" "cache-pruner as a periodic task. When the cache pruner is kicked off, it\n" "compares the current size of image cache and triggers a cleanup if the " "image\n" "cache grew beyond the size specified here. After the cleanup, the size of\n" "cache is less than or equal to size specified here.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "The upper limit on cache size, in bytes, after which the cache-pruner " "cleans\n" "up the image cache.\n" "\n" "NOTE: This is just a threshold for cache-pruner to act upon. It is NOT a\n" "hard limit beyond which the image cache would never grow. In fact, " "depending\n" "on how often the cache-pruner runs and how quickly the cache fills, the " "image\n" "cache can far exceed the size specified here very easily. Hence, care must " "be\n" "taken to appropriately schedule the cache-pruner and in setting this limit.\n" "\n" "Glance caches an image when it is downloaded. Consequently, the size of the\n" "image cache grows over time as the number of downloads increases. To keep " "the\n" "cache size from becoming unmanageable, it is recommended to run the\n" "cache-pruner as a periodic task. When the cache pruner is kicked off, it\n" "compares the current size of image cache and triggers a clean-up if the " "image\n" "cache grew beyond the size specified here. After the clean-up, the size of\n" "cache is less than or equal to size specified here.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "This option doesnt serves the purpose of encryption of location metadata,\n" "whereas it encrypts location url only for specific APIs. Also if enabled\n" "this during an upgrade may disrupt existing deployments, as it does not\n" "support/provide db upgrade script to encrypt existing location URLs.\n" "Moreover, its functionality for encrypting location URLs is inconsistent\n" "which is resulting in download failures.\n" msgstr "" "\n" "This option doesn't serve the purpose of encryption of location metadata,\n" "whereas it encrypts location URL only for specific APIs. Also if enabled\n" "this during an upgrade may disrupt existing deployments, as it does not\n" "support/provide db upgrade script to encrypt existing location URLs.\n" "Moreover, its functionality for encrypting location URLs is inconsistent\n" "which results in download failures.\n" msgid "" "\n" "This option has been removed in Wallaby. Because there is no migration " "path\n" "for installations that had owner_is_tenant==False, we have defined this " "option\n" "so that the code can probe the config file and refuse to start the api " "service\n" "if the deployment has been using that setting.\n" msgstr "" "\n" "This option has been removed in Wallaby. Because there is no migration " "path\n" "for installations that had owner_is_tenant==False, we have defined this " "option\n" "so that the code can probe the config file and refuse to start the API " "service\n" "if the deployment has been using that setting.\n" msgid "" "\n" "This option has had no effect since the removal of native SSL support.\n" msgstr "" "\n" "This option has had no effect since the removal of native SSL support.\n" msgid "" "\n" "Time interval, in seconds, between scrubber runs in daemon mode.\n" "\n" "Scrubber can be run either as a cron job or daemon. When run as a daemon, " "this\n" "configuration time specifies the time period between two runs. When the\n" "scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that\n" "are available for scrubbing after taking ``scrub_time`` into consideration.\n" "\n" "If the wakeup time is set to a large number, there may be a large number of\n" "images to be scrubbed for each run. Also, this impacts how quickly the " "backend\n" "storage is reclaimed.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * ``daemon``\n" " * ``delayed_delete``\n" "\n" msgstr "" "\n" "Time interval, in seconds, between scrubber runs in daemon mode.\n" "\n" "Scrubber can be run either as a cron job or daemon. When run as a daemon, " "this\n" "configuration time specifies the time period between two runs. When the\n" "scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that\n" "are available for scrubbing after taking ``scrub_time`` into consideration.\n" "\n" "If the wakeup time is set to a large number, there may be a large number of\n" "images to be scrubbed for each run. Also, this impacts how quickly the " "backend\n" "storage is reclaimed.\n" "\n" "Possible values:\n" " * Any non-negative integer\n" "\n" "Related options:\n" " * ``daemon``\n" " * ``delayed_delete``\n" "\n" msgid "" "\n" "Timeout for client connections' socket operations.\n" "\n" "Provide a valid integer value representing time in seconds to set\n" "the period of wait before an incoming connection can be closed. The\n" "default value is 900 seconds.\n" "\n" "The value zero implies wait forever.\n" "\n" "Possible values:\n" " * Zero\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgstr "" "\n" "Timeout for client connections' socket operations.\n" "\n" "Provide a valid integer value representing time in seconds to set\n" "the period of wait before an incoming connection can be closed. The\n" "default value is 900 seconds.\n" "\n" "The value zero implies wait forever.\n" "\n" "Possible values:\n" " * Zero\n" " * Positive integer\n" "\n" "Related options:\n" " * None\n" "\n" msgid "" "\n" "Turn on/off delayed delete.\n" "\n" "Typically when an image is deleted, the ``glance-api`` service puts the " "image\n" "into ``deleted`` state and deletes its data at the same time. Delayed " "delete\n" "is a feature in Glance that delays the actual deletion of image data until " "a\n" "later point in time (as determined by the configuration option " "``scrub_time``).\n" "When delayed delete is turned on, the ``glance-api`` service puts the image\n" "into ``pending_delete`` state upon deletion and leaves the image data in " "the\n" "storage backend for the image scrubber to delete at a later time. The image\n" "scrubber will move the image into ``deleted`` state upon successful " "deletion\n" "of image data.\n" "\n" "NOTE: When delayed delete is turned on, image scrubber MUST be running as a\n" "periodic task to prevent the backend storage from filling up with undesired\n" "usage.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * ``scrub_time``\n" " * ``wakeup_time``\n" " * ``scrub_pool_size``\n" "\n" msgstr "" "\n" "Turn on/off delayed delete.\n" "\n" "Typically when an image is deleted, the ``glance-api`` service puts the " "image\n" "into ``deleted`` state and deletes its data at the same time. Delayed " "delete\n" "is a feature in Glance that delays the actual deletion of image data until " "a\n" "later point in time (as determined by the configuration option " "``scrub_time``).\n" "When delayed delete is turned on, the ``glance-api`` service puts the image\n" "into ``pending_delete`` state upon deletion and leaves the image data in " "the\n" "storage backend for the image scrubber to delete at a later time. The image\n" "scrubber will move the image into ``deleted`` state upon successful " "deletion\n" "of image data.\n" "\n" "NOTE: When delayed delete is turned on, image scrubber MUST be running as a\n" "periodic task to prevent the backend storage from filling up with undesired\n" "usage.\n" "\n" "Possible values:\n" " * True\n" " * False\n" "\n" "Related options:\n" " * ``scrub_time``\n" " * ``wakeup_time``\n" " * ``scrub_pool_size``\n" "\n" msgid "" "\n" "Utilize per-tenant resource limits registered in Keystone.\n" "\n" "Enabling this feature will cause Glance to retrieve limits set in keystone\n" "for resource consumption and enforce them against API users. Before turning\n" "this on, the limits need to be registered in Keystone or all quotas will be\n" "considered to be zero, and thus reject all new resource requests.\n" "\n" "These per-tenant resource limits are independent from the static\n" "global ones configured in this config file. If this is enabled, the\n" "relevant static global limits will be ignored.\n" msgstr "" "\n" "Utilize per-tenant resource limits registered in Keystone.\n" "\n" "Enabling this feature will cause Glance to retrieve limits set in keystone\n" "for resource consumption and enforce them against API users. Before turning\n" "this on, the limits need to be registered in Keystone or all quotas will be\n" "considered to be zero, and thus reject all new resource requests.\n" "\n" "These per-tenant resource limits are independent from the static\n" "global ones configured in this config file. If this is enabled, the\n" "relevant static global limits will be ignored.\n" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "%(cls)s exception was raised in the last rpc call: %(val)s" #, python-format msgid "%(interface)s glance endpoint not found for region %(region)s" msgstr "%(interface)s glance endpoint not found for region %(region)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s not found in the member list of the image %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) is running..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s appears to already be running: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s failed but since all_stores_must_succeed is set " "to false, continue." msgstr "" "%(task_id)s of %(task_type)s failed but since all_stores_must_succeed is set " "to false, continue." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Value of " "node_staging_uri must be in format 'file://'" msgstr "" "%(task_id)s of %(task_type)s not configured properly. Value of " "node_staging_uri must be in format 'file://'" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)sing %(serv)s with %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s can't contain 4 byte unicode characters." #, python-format msgid "%s is already set with a different value" msgstr "%s is already set with a different value" #, python-format msgid "%s is already stopped" msgstr "%s is already stopped" #, python-format msgid "%s is stopped" msgstr "%s is stopped" #, python-format msgid "%s of uploaded data is different from current value set on the image." msgstr "%s of uploaded data is different from current value set on the image." #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d." msgstr "'%(param)s' value out of range, must not exceed %(max)d." msgid "'all_stores' must be boolean value only" msgstr "'all_stores' must be boolean value only" msgid "'all_stores_must_succeed' must be boolean value only" msgstr "'all_stores_must_succeed' must be boolean value only" msgid "'container_format' needs to be set before import" msgstr "'container_format' needs to be set before import" msgid "'disk_format' needs to be set before import" msgstr "'disk_format' needs to be set before import" msgid "'glance-direct' method is not available at this site." msgstr "'glance-direct' method is not available at this site." msgid "'glance_image_id' needs to be set for glance-download import method" msgstr "'glance_image_id' needs to be set for glance-download import method" msgid "'glance_region' needs to be set for glance-download import method" msgstr "'glance_region' needs to be set for glance-download import method" msgid "'node_staging_uri' is not set correctly. Could not load staging store." msgstr "'node_staging_uri' is not set correctly. Could not load staging store." msgid "" "'os_glance_' prefix should not be used in enabled_backends config option. It " "is reserved for internal use only." msgstr "" "'os_glance_' prefix should not be used in enabled_backends config option. It " "is reserved for internal use only." msgid "" "'worker_self_reference_url' needs to be set if `centralized_db` is defined " "as cache driver for image_cache_driver config option." msgstr "" "'worker_self_reference_url' needs to be set if `centralized_db` is defined " "as a cache driver for image_cache_driver config option." msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "Keystone authentication strategy is enabled\n" msgid "A body is not expected with this request." msgstr "A body is not expected with this request." msgid "" "A list of strings describing allowed VMDK 'create-type' subformats that will " "be allowed. This is recommended to only include single-file-with-sparse-" "header variants to avoid potential host file exposure due to processing " "named extents. If this list is empty, then no VDMK image types allowed. Note " "that this is currently only checked during image conversion (if enabled), " "and limits the types of VMDK images we will convert from." msgstr "" "A list of strings describing allowed VMDK 'create-type' subformats that will " "be allowed. This is recommended to only include single-file-with-sparse-" "header variants to avoid potential host file exposure due to processing " "named extents. If this list is empty, then no VDMK image types allowed. Note " "that this is currently only checked during image conversion (if enabled), " "and limits the types of VMDK images we will convert from." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s. (Please note that metadata tag names are case " "insensitive)." msgstr "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s. (Please note that metadata tag names are case " "insensitive)." msgid "A set of URLs to access the image file kept in external store" msgstr "A set of URLs to access the image file kept in external store" #, python-format msgid "" "After upload to backend, deletion of staged image data from %(fn)s has " "failed because %(em)s" msgstr "" "After upload to backend, deletion of staged image data from %(fn)s has " "failed because %(em)s" #, python-format msgid "" "After upload to backend, deletion of staged image data from %(fn)s has " "failed because [Errno %(en)d]" msgstr "" "After upload to backend, deletion of staged image data from %(fn)s has " "failed because [Errno %(en)d]" #, python-format msgid "" "After upload to backend, deletion of staged image data has failed because it " "cannot be found at %(fn)s" msgstr "" "After upload to backend, deletion of staged image data has failed because it " "cannot be found at %(fn)s" #, python-format msgid "After upload to the backend, deleting staged image data from %(fn)s" msgstr "After upload to the backend, deleting staged image data from %(fn)s" msgid "Algorithm to calculate the os_hash_value" msgstr "Algorithm to calculate the os_hash_value" msgid "" "All_stores parameter can't be used with x-image-meta-store header or stores " "parameter" msgstr "" "All_stores parameter can't be used with x-image-meta-store header or stores " "parameter" #, python-format msgid "All_stores_must_succeed can only be set with enabled_backends %s" msgstr "All_stores_must_succeed can only be set with enabled_backends %s" msgid "Amount of disk space (in GB) required to boot image." msgstr "Amount of disk space (in GB) required to boot image." msgid "Amount of ram (in MB) required to boot image." msgstr "Amount of ram (in MB) required to boot image." msgid "An identifier for the image" msgstr "An identifier for the image" msgid "An identifier for the image member (tenantId)" msgstr "An identifier for the image member (tenantId)" msgid "An identifier for the owner of this task" msgstr "An identifier for the owner of this task" msgid "An identifier for the task" msgstr "An identifier for the task" msgid "An image file url" msgstr "An image file URL" msgid "An image schema url" msgstr "An image schema URL" msgid "An image self url" msgstr "An image self URL" msgid "An import task exception occurred" msgstr "An import task exception occurred" msgid "An object with the same identifier already exists." msgstr "An object with the same identifier already exists." msgid "An object with the same identifier is currently being operated on." msgstr "An object with the same identifier is currently being operated on." msgid "An object with the specified identifier was not found." msgstr "An object with the specified identifier was not found." msgid "An unknown exception occurred" msgstr "An unknown exception occurred" msgid "An unknown task exception occurred" msgstr "An unknown task exception occurred" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Attribute '%(property)s' is read-only." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "Attribute '%(property)s' is reserved." #, python-format msgid "Attribute '%s' is read-only." msgstr "Attribute '%s' is read-only." #, python-format msgid "Attribute '%s' is reserved." msgstr "Attribute '%s' is reserved." msgid "Attribute container_format can be only replaced for a queued image." msgstr "Attribute container_format can be only replaced for a queued image." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "Attribute disk_format can be only replaced for a queued image." msgid "" "Auth key for the user authenticating against the Swift authentication " "service." msgstr "" "Auth key for the user authenticating against the Swift authentication " "service." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Auth service at URL %(url)s not found." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgid "Authorization failed." msgstr "Authorisation failed." msgid "Available categories:" msgstr "Available categories:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." #, python-format msgid "Bad header: %(header_name)s" msgstr "Bad header: %(header_name)s" msgid "Body expected in request." msgstr "Body expected in request." msgid "" "CONF.workers should be set to 0 or 1 when using the db.simple.api backend. " "Fore more info, see https://bugs.launchpad.net/glance/+bug/1619508" msgstr "" "CONF.workers should be set to 0 or 1 when using the db.simple.api backend. " "Fore more info, see https://bugs.launchpad.net/glance/+bug/1619508" #, python-format msgid "Cache entry for %s for %s already exists." msgstr "Cache entry for %s for %s already exists." msgid "Caching via API is not supported at this site." msgstr "Caching via API is not supported at this site." msgid "Cannot be a negative value" msgstr "Cannot be a negative value" msgid "Cannot be a negative value." msgstr "Cannot be a negative value." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "Cannot convert image %(key)s '%(value)s' to an integer." msgid "" "Cannot delete image data from the only store containing it. Consider " "deleting the image instead." msgstr "" "Cannot delete image data from the only store containing it. Consider " "deleting the image instead." #, python-format msgid "Cannot delete staged image data %(fn)s [Errno %(en)d]" msgstr "Cannot delete staged image data %(fn)s [Errno %(en)d]" msgid "Cannot remove last location in the image." msgstr "Cannot remove last location in the image." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Cannot save data for image %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Cannot set locations to empty list." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "Checksum verification failed. Aborted caching of image '%s'." #, python-format msgid "Conflicting values for %s" msgstr "Conflicting values for %s" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "Connect error/bad request to Auth service at URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "Constructed URL: %s" #, python-format msgid "Copied %i MiB" msgstr "Copied %i MiB" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Corrupt image download for image %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgid "Could not find OVF file in OVA archive file." msgstr "Could not find OVF file in OVA archive file." #, python-format msgid "Could not find metadata object %s" msgstr "Could not find metadata object %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Could not find metadata tag %s" #, python-format msgid "Could not find property %s" msgstr "Could not find property %s" #, python-format msgid "Could not find task %s" msgstr "Could not find task %s" #, python-format msgid "Could not update image: %s" msgstr "Could not update image: %s" #, python-format msgid "Couldn't create metadata namespace: %s" msgstr "Couldn't create metadata namespace: %s" #, python-format msgid "Couldn't create metadata object: %s" msgstr "Couldn't create metadata object: %s" #, python-format msgid "Couldn't create metadata property: %s" msgstr "Couldn't create metadata property: %s" #, python-format msgid "Couldn't create metadata tag: %s" msgstr "Couldn't create metadata tag: %s" #, python-format msgid "Couldn't update metadata namespace: %s" msgstr "Couldn't update metadata namespace: %s" #, python-format msgid "Couldn't update metadata object: %s" msgstr "Couldn't update metadata object: %s" #, python-format msgid "Couldn't update metadata property: %s" msgstr "Couldn't update metadata property: %s" #, python-format msgid "Couldn't update metadata tag: %s" msgstr "Couldn't update metadata tag: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "Currently, OVA packages containing multiple disk are not supported." msgid "Custom property should not be greater than 255 characters." msgstr "Custom property should not be greater than 255 characters." msgid "" "Data migration did not run. Data migration cannot be run before database " "expansion. Run database expansion first using \"glance-manage db expand\"" msgstr "" "Data migration did not run. Data migration cannot be run before database " "expansion. Run database expansion first using \"glance-manage db expand\"" msgid "Data supplied was not valid." msgstr "Data supplied was not valid." msgid "" "Database contraction did not run. Database contraction cannot be run before " "data migration is complete. Run data migration using \"glance-manage db " "migrate\"." msgstr "" "Database contraction did not run. Database contraction cannot be run before " "data migration is complete. Run data migration using \"glance-manage db " "migrate\"." msgid "" "Database contraction did not run. Database contraction cannot be run before " "database expansion. Run database expansion first using \"glance-manage db " "expand\"" msgstr "" "Database contraction did not run. Database contraction cannot be run before " "database expansion. Run database expansion first using \"glance-manage db " "expand\"" msgid "" "Database contraction failed. Couldn't find head revision of contract branch." msgstr "" "Database contraction failed. Couldn't find head revision of contract branch." #, python-format msgid "" "Database contraction failed. Database contraction should have brought the " "database version up to \"%(e_rev)s\" revision. But, current revisions are: " "%(curr_revs)s " msgstr "" "Database contraction failed. Database contraction should have brought the " "database version up to \"%(e_rev)s\" revision. But, current revisions are: " "%(curr_revs)s " msgid "" "Database expansion failed. Couldn't find head revision of expand branch." msgstr "" "Database expansion failed. Couldn't find head revision of expand branch." #, python-format msgid "" "Database expansion failed. Database expansion should have brought the " "database version up to \"%(e_rev)s\" revision. But, current revisions are: " "%(curr_revs)s " msgstr "" "Database expansion failed. Database expansion should have brought the " "database version up to \"%(e_rev)s\" revision. But, current revisions are: " "%(curr_revs)s " msgid "Database expansion is up to date. No expansion needed." msgstr "Database expansion is up to date. No expansion needed." msgid "" "Database is either not under migration control or under legacy migration " "control, please run \"glance-manage db sync\" to place the database under " "alembic migration control." msgstr "" "Database is either not under migration control or under legacy migration " "control, please run \"glance-manage db sync\" to place the database under " "alembic migration control." msgid "Database is synced successfully." msgstr "Database is synced successfully." msgid "Database is up to date. No migrations needed." msgstr "Database is up to date. No migrations needed." msgid "Database is up to date. No upgrades needed." msgstr "Database is up to date. No upgrades needed." msgid "Database migration is up to date. No migration needed." msgstr "Database migration is up to date. No migration needed." msgid "Date and time of image member creation" msgstr "Date and time of image member creation" msgid "Date and time of image registration" msgstr "Date and time of image registration" msgid "Date and time of last modification of image member" msgstr "Date and time of last modification of image member" msgid "Date and time of namespace creation" msgstr "Date and time of namespace creation" msgid "Date and time of object creation" msgstr "Date and time of object creation" msgid "Date and time of resource type association" msgstr "Date and time of resource type association" msgid "Date and time of tag creation" msgstr "Date and time of tag creation" msgid "Date and time of the last image modification" msgstr "Date and time of the last image modification" msgid "Date and time of the last namespace modification" msgstr "Date and time of the last namespace modification" msgid "Date and time of the last object modification" msgstr "Date and time of the last object modification" msgid "Date and time of the last resource type association modification" msgstr "Date and time of the last resource type association modification" msgid "Date and time of the last tag modification" msgstr "Date and time of the last tag modification" msgid "Datetime when this resource was created" msgstr "Datetime when this resource was created" msgid "Datetime when this resource was updated" msgstr "Datetime when this resource was updated" msgid "Datetime when this resource would be subject to removal" msgstr "Datetime when this resource would be subject to removal" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Denying attempt to upload image because it exceeds the quota: %s" msgid "Descriptive name for the image" msgstr "Descriptive name for the image" msgid "" "Did not receive a pipe handle, which is used when communicating with the " "parent process." msgstr "" "Did not receive a pipe handle, which is used when communicating with the " "parent process." #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" #, python-format msgid "Error deleting from store %(store)s when reverting." msgstr "Error deleting from store %(store)s when reverting." msgid "Error deleting from store foo when reverting." msgstr "Error deleting from store foo when reverting." #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Error fetching members of image %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "Error in store configuration. Adding images to store is disabled." #, python-format msgid "Error: %(exc_type)s: %(e)s" msgstr "Error: %(exc_type)s: %(e)s" msgid "Error: : testing" msgstr "Error: : testing" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Expected a member in the form: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Expected a status in the form: {\"status\": \"status\"}" #, python-format msgid "" "Failed to calculate checksum of %(image_id)s as image data has been deleted " "from the backend" msgstr "" "Failed to calculate checksum of %(image_id)s as image data has been deleted " "from the backend" #, python-format msgid "Failed to find image %(image_id)s" msgstr "Failed to find image %(image_id)s" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Failed to find image %(image_id)s to delete" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Failed to find resource type %(resourcetype)s to delete" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "Failed to initialise the image cache database. Got error: %s" #, python-format msgid "Failed to read %s from config" msgstr "Failed to read %s from config" #, python-format msgid "Failed to sync database: ERROR: %s" msgstr "Failed to sync database: ERROR: %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "File %(path)s has invalid backing file %(bfile)s, aborting." #, python-format msgid "File %(path)s has invalid data-file %(dfile)s, aborting." msgstr "File %(path)s has invalid data-file %(dfile)s, aborting." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "File based imports are not allowed. Please use a non-local source of image " "data." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "Forbidding request, metadata definition namespace=%s is not visible." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Forbidding request, task %s is not visible" msgid "Format of the container" msgstr "Format of the container" msgid "Format of the disk" msgstr "Format of the disk" #, python-format msgid "Hash calculation failed for image %s data" msgstr "Hash calculation failed for image %s data" msgid "" "Hexdigest of the image contents using the algorithm specified by the " "os_hash_algo" msgstr "" "Hexdigest of the image contents using the algorithm specified by the " "os_hash_algo" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" is not valid." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host and port \"%s\" is not valid." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgid "Human-readable informative request-id" msgstr "Human-readable informative request-id" msgid "" "If provided 'x-image-cache-clear-target' must be 'cache', 'queue' or empty " "string." msgstr "" "If provided 'x-image-cache-clear-target' must be 'cache', 'queue' or empty " "string." msgid "If true, image will not appear in default image list response." msgstr "If true, image will not appear in default image list response." msgid "If true, image will not be deletable." msgstr "If true, image will not be deletable." msgid "If true, namespace will not be deletable." msgstr "If true, namespace will not be deletable." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "Image %(id)s could not be deleted because it is in use: %(exc)s" #, python-format msgid "Image %(iid)s is not stored in store %(sid)s." msgstr "Image %(iid)s is not stored in store %(sid)s." #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "Image %(image_id)s is protected and cannot be deleted." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." #, python-format msgid "Image %s not found." msgstr "Image %s not found." msgid "Image associated with the task" msgstr "Image associated with the task" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Image exceeds the storage quota: %s" msgid "Image id is required." msgstr "Image id is required." msgid "Image is a VMDK, but no VMDK createType is specified" msgstr "Image is a VMDK, but no VMDK createType is specified" #, python-format msgid "Image is already present at store '%s'" msgstr "Image is already present at store '%s'" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "Image member limit exceeded for image %(id)s: %(e)s:" #, python-format msgid "Image needs to be in 'queued' state to use '%s' method" msgstr "Image needs to be in 'queued' state to use '%s' method" msgid "Image needs to be staged before 'glance-direct' method can be used" msgstr "Image needs to be staged before 'glance-direct' method can be used" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" #, python-format msgid "Image storage media is full: %s" msgstr "Image storage media is full: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Image tag limit exceeded for image %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Image upload problem: %s" msgid "Image with status active cannot be target for import" msgstr "Image with status active cannot be target for import" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Image with the given id %(image_id)s was not found" msgid "Import request requires a 'method' field." msgstr "Import request requires a 'method' field." msgid "Import request requires a 'name' field." msgstr "Import request requires a 'name' field." #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Incorrect request: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Input does not contain '%(key)s' field" msgid "Input to api_image_import task is empty." msgstr "Input to api_image_import task is empty." msgid "Input to location_import task is empty." msgstr "Input to location_import task is empty." #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Insufficient permissions on image storage media: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Invalid JSON pointer for this resource: '/%s'" msgid "Invalid VMDK create-type specified" msgstr "Invalid VMDK create-type specified" msgid "Invalid configuration in glance-swift conf file." msgstr "Invalid configuration in glance-swift conf file." msgid "Invalid configuration in property protection file." msgstr "Invalid configuration in property protection file." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Invalid content type %(content_type)s" #, python-format msgid "" "Invalid data migration script '%(script)s'. A valid data migration script " "must implement functions 'has_migrations' and 'migrate'." msgstr "" "Invalid data migration script '%(script)s'. A valid data migration script " "must implement functions 'has_migrations' and 'migrate'." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Invalid filter value %s. The quote is not closed." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Invalid filter value %s. There is no comma after closing quotation mark." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Invalid filter value %s. There is no comma before opening quotation mark." #, python-format msgid "Invalid int value for age_in_days: %(age_in_days)s" msgstr "Invalid int value for age_in_days: %(age_in_days)s" #, python-format msgid "Invalid int value for max_rows: %(max_rows)s" msgstr "Invalid int value for max_rows: %(max_rows)s" msgid "Invalid location" msgstr "Invalid location" #, python-format msgid "Invalid location: %s" msgstr "Invalid location: %s" msgid "Invalid locations" msgstr "Invalid locations" #, python-format msgid "Invalid locations: %s" msgstr "Invalid locations: %s" msgid "Invalid marker format" msgstr "Invalid marker format" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgid "Invalid position for adding a location." msgstr "Invalid position for adding a location." msgid "Invalid position for removing a location." msgstr "Invalid position for removing a location." msgid "Invalid service catalog json." msgstr "Invalid service catalogue json." #, python-format msgid "Invalid sort direction: %s" msgstr "Invalid sort direction: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Invalid status value: %s" #, python-format msgid "Invalid status: %s" msgstr "Invalid status: %s" #, python-format msgid "Invalid type value: %s" msgstr "Invalid type value: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" #, python-format msgid "" "Invalid value '%s' for 'os_hidden' filter. Valid values are 'true' or " "'false'." msgstr "" "Invalid value '%s' for 'os_hidden' filter. Valid values are 'true' or " "'false'." #, python-format msgid "" "Invalid value '%s' for 'protected' filter. Valid values are 'true' or " "'false'." msgstr "" "Invalid value '%s' for 'protected' filter. Valid values are 'true' or " "'false'." #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Invalid value for option %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Invalid visibility value: %s" #, python-format msgid "It's not allowed to add locations if image status is %s." msgstr "It's not allowed to add locations if image status is %s." msgid "It's not allowed to add locations if locations are invisible." msgstr "It's not allowed to add locations if locations are invisible." msgid "" "It's not allowed to remove image data from store if image status is not " "'active'" msgstr "" "It's not allowed to remove image data from store if image status is not " "'active'" #, python-format msgid "It's not allowed to remove locations if image status is %s." msgstr "It's not allowed to remove locations if image status is %s." msgid "It's not allowed to remove locations if locations are invisible." msgstr "It's not allowed to remove locations if locations are invisible." #, python-format msgid "It's not allowed to replace locations if image status is %s." msgstr "It's not allowed to replace locations if image status is %s." msgid "It's not allowed to update locations if locations are invisible." msgstr "It's not allowed to update locations if locations are invisible." msgid "" "Key:Value pair of store identifier and store type. In case of multiple " "backends should be separated using comma." msgstr "" "Key:Value pair of store identifier and store type. In case of multiple " "backends should be separated using comma." msgid "List of strings related to the image" msgstr "List of strings related to the image" msgid "Malformed JSON in request body." msgstr "Malformed JSON in request body." msgid "Maximal age is count of days since epoch." msgstr "Maximal age is count of days since epoch." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Maximum redirects (%(redirects)s) was exceeded." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Member %(member_id)s is duplicated for image %(image_id)s" msgid "Member can't be empty" msgstr "Member can't be empty" msgid "Member to be added not specified" msgstr "Member to be added not specified" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Metadata definition namespace not found for id=%s" #, python-format msgid "Metadata definition namespace=%(namespace_name)s was not found." msgstr "Metadata definition namespace=%(namespace_name)s was not found." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Metadata definition object not found for id=%s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Metadata definition property not found for id=%s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Metadata definition tag not found for id=%s" #, python-format msgid "Migrated %s rows" msgstr "Migrated %s rows" msgid "Minimal rows limit is -1." msgstr "Minimal rows limit is -1." msgid "Missing required 'image_id' field" msgstr "Missing required 'image_id' field" #, python-format msgid "Missing required credential: %(required)s" msgstr "Missing required credential: %(required)s" msgid "Multi backend is not supported at this site." msgstr "Multi backend is not supported at this site." #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgid "Must supply a non-negative value for age." msgstr "Must supply a non-negative value for age." #, python-format msgid "Namespace %s not found" msgstr "Namespace %s not found" #, python-format msgid "" "New operation on image '%s' is not permitted as prior operation is still in " "progress" msgstr "" "New operation on image '%s' is not permitted as prior operation is still in " "progress" #, python-format msgid "New value(s) for %s may only be provided when image status is 'queued'" msgstr "New value(s) for %s may only be provided when image status is 'queued'" #, python-format msgid "No image found with ID %s" msgstr "No image found with ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "No location found with ID %(loc)s from image %(img)s" msgid "None of the uploads finished!" msgstr "None of the uploads finished!" #, python-format msgid "Not allowed to create members for image %s." msgstr "Not allowed to create members for image %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Not allowed to deactivate image in status '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Not allowed to delete members for image %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Not allowed to delete tags for image %s." #, python-format msgid "Not allowed to list members for image %(image_id)s: %(inner_msg)s" msgstr "Not allowed to list members for image %(image_id)s: %(inner_msg)s" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Not allowed to reactivate image in status '%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "Not allowed to update members for image %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Not allowed to update tags for image %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "Not allowed to upload image data for image %(image_id)s: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "Number of sort dirs does not match the number of sort keys" msgid "OVA extract is limited to admin" msgstr "OVA extract is limited to admin" msgid "Old and new sorting syntax cannot be combined" msgstr "Old and new sorting syntax cannot be combined" msgid "Only images with status active can be targeted for copying" msgstr "Only images with status active can be targeted for copying" msgid "Only images with status active can be targeted for queueing" msgstr "Only images with status active can be targeted for queueing" msgid "Only shared images have members." msgstr "Only shared images have members." #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "Operation \"%s\" requires a member named \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgid "Operations must be JSON objects." msgstr "Operations must be JSON objects." #, python-format msgid "Original locations is not empty: %s" msgstr "Original locations is not empty: %s" msgid "Owner can't be updated by non admin." msgstr "Owner can't be updated by non admin." msgid "Owner of the image" msgstr "Owner of the image" msgid "Owner of the namespace." msgstr "Owner of the namespace." msgid "Param values can't contain 4 byte unicode." msgstr "Param values can't contain 4 byte Unicode." msgid "Placed database under migration control at revision:" msgstr "Placed database under migration control at revision:" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "Pointer `%s` contains \"~\" not part of a recognised escape sequence." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Pointer `%s` contains adjacent \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Pointer `%s` does not contains valid token." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Pointer `%s` does not start with \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Pointer `%s` end with \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Port \"%s\" is not valid." #, python-format msgid "Process %d not running" msgstr "Process %d not running" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Properties %s must be set prior to saving data." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Property %s already present." #, python-format msgid "Property %s does not exist." msgstr "Property %s does not exist." #, python-format msgid "Property %s may not be removed." msgstr "Property %s may not be removed." #, python-format msgid "Property %s must be set prior to saving data." msgstr "Property %s must be set prior to saving data." msgid "Property names can't contain 4 byte unicode." msgstr "Property names can't contain 4 byte Unicode." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Provided object does not match schema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Provided status of task is unsupported: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Provided type of task is unsupported: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Provides a user friendly description of the namespace." msgid "Purge command failed, check glance-manage logs for more details." msgstr "Purge command failed, check glance-manage logs for more details." msgid "Received invalid HTTP redirect." msgstr "Received invalid HTTP redirect." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirecting to %(uri)s for authorisation." #, python-format msgid "Referenced %s is not cached on %s." msgstr "Referenced %s is not cached on %s." msgid "Refusing to process VMDK file as vmdk_allowed_types is empty" msgstr "Refusing to process VMDK file as vmdk_allowed_types is empty" #, python-format msgid "" "Refusing to process VMDK file with create-type of %r which is not in allowed " "set of: %s" msgstr "" "Refusing to process VMDK file with create-type of %r which is not in allowed " "set of: %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Registry was not configured correctly on API server. Reason: %(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Reload of %(serv)s not supported" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" #, python-format msgid "Remote image id does not look like a UUID: %s" msgstr "Remote image id does not look like a UUID: %s" #, python-format msgid "Removing stale pid file %s" msgstr "Removing stale pid file %s" msgid "Request body must be a JSON array of operation objects." msgstr "Request body must be a JSON array of operation objects." msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: https://docs.openstack.org/heat/latest/template_guide/openstack." "html" msgstr "" "Resource type names should be aligned with Heat resource types whenever " "possible: https://docs.openstack.org/heat/latest/template_guide/openstack." "html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Response from Keystone does not contain a Glance endpoint." msgid "Rolling upgrades are currently supported only for MySQL and Sqlite" msgstr "Rolling upgrades are currently supported only for MySQL and Sqlite" msgid "Scope of image accessibility" msgstr "Scope of image accessibility" msgid "Scope of namespace accessibility." msgstr "Scope of namespace accessibility." msgid "Scrubber encountered an error while trying to fetch scrub jobs." msgstr "Scrubber encountered an error while trying to fetch scrub jobs." #, python-format msgid "Server %(serv)s is stopped" msgstr "Server %(serv)s is stopped" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Server worker creation failed: %(reason)s." msgid "Signature verification failed" msgstr "Signature verification failed" #, python-format msgid "Size attribute of remote image %s could not be determined." msgstr "Size attribute of remote image %s could not be determined." msgid "Size of image file in bytes" msgstr "Size of image file in bytes" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgid "Sort direction supplied was not valid." msgstr "Sort direction supplied was not valid." msgid "Sort key supplied was not valid." msgstr "Sort key supplied was not valid." #, python-format msgid "Source image status should be active instead of %s" msgstr "Source image status should be active instead of %s" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgid "Specifying both 'visibility' and 'is_public' is not permiitted." msgstr "Specifying both 'visibility' and 'is_public' is not permitted." #, python-format msgid "Staged image data not found at %(fn)s" msgstr "Staged image data not found at %(fn)s" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Status must be \"pending\", \"accepted\" or \"rejected\"." msgid "Status not specified" msgstr "Status not specified" msgid "Status of the image" msgstr "Status of the image" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "Status transition from %(cur_status)s to %(new_status)s is not allowed" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" #, python-format msgid "Store %s is not available on this node, skipping `_set_acls` call." msgstr "Store %s is not available on this node, skipping `_set_acls` call." msgid "" "Store in which image data resides. Only present when the operator has " "enabled multiple stores. May be a comma-separated list of store identifiers." msgstr "" "Store in which image data resides. Only present when the operator has " "enabled multiple stores. May be a comma-separated list of store identifiers." msgid "Stores parameter and x-image-meta-store header can't be both specified" msgstr "Stores parameter and x-image-meta-store header can't be both specified" msgid "Supported values for the 'container_format' image attribute" msgstr "Supported values for the 'container_format' image attribute" msgid "Supported values for the 'disk_format' image attribute" msgstr "Supported values for the 'disk_format' image attribute" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Suppressed re-spawn as %(serv)s was %(rsn)s." msgid "System SIGHUP signal received." msgstr "System SIGHUP signal received." #, python-format msgid "Task '%s' is required" msgstr "Task '%s' is required" msgid "Task does not exist" msgstr "Task does not exist" msgid "Task failed due to Internal Error" msgstr "Task failed due to Internal Error" msgid "Task was aborted externally" msgstr "Task was aborted externally" msgid "Task was not configured properly" msgstr "Task was not configured properly" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Task with the given id %(task_id)s was not found" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "The \"changes-since\" filter is no longer available on v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "The CA file you specified %s does not exist" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgid "The Store URI was malformed." msgstr "The Store URI was malformed." msgid "The URL of the new location to be added in the image." msgstr "The URL of the new location to be added in the image." msgid "The address where the Swift authentication service is listening." msgstr "The address where the Swift authentication service is listening." #, python-format msgid "The cert file you specified %s does not exist" msgstr "The cert file you specified %s does not exist" msgid "The current status of this task" msgstr "The current status of this task" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "The given URI is not valid. Please specify a valid URI from the following " "list of supported URI %(supported)s" #, python-format msgid "The image %s has data on staging" msgstr "The image %s has data on staging" #, python-format msgid "" "The image %s is already present on the target, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the target server." msgstr "" "The image %s is already present on the target, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the target server." #, python-format msgid "The incoming image is too large: %s" msgstr "The incoming image is too large: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "The key file you specified %s does not exist" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "The location %(location)s already exists" #, python-format msgid "The location data has an invalid ID: %d" msgstr "The location data has an invalid ID: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "The metadata definition namespace=%(namespace_name)s already exists." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." #, python-format msgid "The node reference %s was not found." msgstr "The node reference %s was not found." msgid "The parameters required by task, JSON blob" msgstr "The parameters required by task, JSON blob" msgid "The provided image is too large." msgstr "The provided image is too large." msgid "The request returned 500 Internal Server Error." msgstr "The request returned 500 Internal Server Error." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "The requested image has been deactivated. Image data download is forbidden." msgid "The result of current task, JSON blob" msgstr "The result of current task, JSON blob" #, python-format msgid "The selected store %s is not available on this node." msgstr "The selected store %s is not available on this node." #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." #, python-format msgid "The specified member %s could not be found" msgstr "The specified member %s could not be found" #, python-format msgid "The specified metadata object %s could not be found" msgstr "The specified metadata object %s could not be found" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "The specified metadata tag %s could not be found" #, python-format msgid "The specified namespace %s could not be found" msgstr "The specified namespace %s could not be found" #, python-format msgid "The specified property %s could not be found" msgstr "The specified property %s could not be found" #, python-format msgid "The specified resource type %s could not be found " msgstr "The specified resource type %s could not be found " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgid "The status of this image member" msgstr "The status of this image member" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgid "The type of task represented by this content" msgstr "The type of task represented by this content" msgid "The unique namespace text." msgstr "The unique namespace text." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "The user friendly name for the namespace. Used by UI if available." msgid "The user to authenticate against the Swift authentication service." msgstr "The user to authenticate against the Swift authentication service." msgid "There was an error configuring the client." msgstr "There was an error configuring the client." msgid "There was an error connecting to a server" msgstr "There was an error connecting to a server" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Time in hours for which a task lives after, either succeeding or failing" msgid "Too few arguments." msgstr "Too few arguments." #, python-format msgid "" "Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" msgstr "" "Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" #, python-format msgid "URI for web-download does not pass filtering: %s" msgstr "URI for web-download does not pass filtering: %s" msgid "URL to access the image file kept in external store" msgstr "URL to access the image file kept in external store" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgid "Unable to determine VMDK create-type" msgstr "Unable to determine VMDK create-type" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Unable to filter by unknown operator '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "Unable to filter on a range with a non-numeric value." msgid "Unable to filter on a unknown operator." msgstr "Unable to filter on a unknown operator." msgid "Unable to filter using the specified operator." msgstr "Unable to filter using the specified operator." msgid "Unable to filter using the specified range." msgstr "Unable to filter using the specified range." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Unable to find '%s' in JSON Schema change" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "Unable to increase file descriptor limit. Running as non-root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Unable to load schema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Unable to locate paste config file for %s." #, python-format msgid "Unable to upload duplicate image data for image %(image_id)s: %(error)s" msgstr "" "Unable to upload duplicate image data for image %(image_id)s: %(error)s" msgid "Unexpected body type. Expected list/dict." msgstr "Unexpected body type. Expected list/dict." #, python-format msgid "Unexpected exception when deleting from store %(store)s." msgstr "Unexpected exception when deleting from store %(store)s." msgid "Unexpected exception when deleting from store foo." msgstr "Unexpected exception when deleting from store foo." #, python-format msgid "Unexpected response: %s" msgstr "Unexpected response: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Unknown auth strategy '%s'" #, python-format msgid "Unknown command: %s" msgstr "Unknown command: %s" #, python-format msgid "Unknown import method name '%s'." msgstr "Unknown import method name '%s'." msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Unknown sort direction, must be 'desc' or 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Unrecognised JSON Schema draft version" #, python-format msgid "Upgraded database to: %(v)s, current revision(s): %(r)s" msgstr "Upgraded database to: %(v)s, current revision(s): %(r)s" msgid "Upgraded database, current revision(s):" msgstr "Upgraded database, current revision(s):" #, python-format msgid "Uploading the image failed due to: %(exc)s" msgstr "Uploading the image failed due to: %(exc)s" msgid "" "Use of this option, deprecated since Newton, is a security risk and will be " "removed once we figure out a way to satisfy those use cases that currently " "require it. An earlier announcement that the same functionality can be " "achieved with greater granularity by using policies is incorrect. You " "cannot work around this option via policy configuration at the present time, " "though that is the direction we believe the fix will take. Please keep an " "eye on the Glance release notes to stay up to date on progress in addressing " "this issue." msgstr "" "Use of this option, deprecated since Newton, is a security risk and will be " "removed once we figure out a way to satisfy those use cases that currently " "require it. An earlier announcement that the same functionality can be " "achieved with greater granularity by using policies is incorrect. You " "cannot work around this option via policy configuration at the present time, " "though that is the direction we believe the fix will take. Please keep an " "eye on the Glance release notes to stay up to date on progress in addressing " "this issue." msgid "User associated with the task" msgstr "User associated with the task" msgid "" "Values of location url, do_secure_hash and validation_data for new add " "location API" msgstr "" "Values of location URL, do_secure_hash and validation_data for new add " "location API" msgid "" "Values to be used to populate the corresponding image properties. If the " "image status is not 'queued', values must exactly match those already " "contained in the image properties." msgstr "" "Values to be used to populate the corresponding image properties. If the " "image status is not 'queued', values must exactly match those already " "contained in the image properties." msgid "" "Values to be used to populate the corresponding image properties." "do_secure_hash is not True then image checksum and hash will not be " "calculated so it is the responsibility of the consumer of location ADD API " "to provide the correct values in the validation_data parameter" msgstr "" "Values to be used to populate the corresponding image properties." "do_secure_hash is not True then image checksum and hash will not be " "calculated so it is the responsibility of the consumer of location ADD API " "to provide the correct values in the validation_data parameter" msgid "Virtual size of image in bytes" msgstr "Virtual size of image in bytes" msgid "" "Visibility must be one of \"community\", \"public\", \"private\", or \"shared" "\"" msgstr "" "Visibility must be one of \"community\", \"public\", \"private\", or \"shared" "\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgid "You are not authenticated." msgstr "You are not authenticated." #, python-format msgid "You are not authorized to complete %(action)s action." msgstr "You are not authorised to complete %(action)s action." msgid "You are not authorized to complete this action." msgstr "You are not authorised to complete this action." #, python-format msgid "You are not authorized to lookup image %s." msgstr "You are not authorised to lookup image %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "You are not authorised to lookup the members of the image %s." msgid "You are not permitted to create image members for the image." msgstr "You are not permitted to create image members for the image." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "You are not permitted to create images owned by '%s'." #, python-format msgid "You are not permitted to create images owned by '%s'owner" msgstr "You are not permitted to create images owned by '%s'owner" msgid "You are not permitted to modify 'status' on this image member." msgstr "You are not permitted to modify 'status' on this image member." msgid "You cannot delete image member." msgstr "You cannot delete image member." msgid "You do not own this image" msgstr "You do not own this image" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgid "" "Your database is not up to date. Your first step is to run `glance-manage db " "expand`." msgstr "" "Your database is not up to date. Your first step is to run `glance-manage db " "expand`." msgid "" "Your database is not up to date. Your next step is to run `glance-manage db " "contract`." msgstr "" "Your database is not up to date. Your next step is to run `glance-manage db " "contract`." msgid "" "Your database is not up to date. Your next step is to run `glance-manage db " "migrate`." msgstr "" "Your database is not up to date. Your next step is to run `glance-manage db " "migrate`." msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() got unexpected keyword argument '%s'" #, python-format msgid "" "cannot restore the image from %s to active (wanted from_state=pending_delete)" msgstr "" "cannot restore the image from %s to active (wanted from_state=pending_delete)" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" #, python-format msgid "checksum (%s) is not a valid hexadecimal value" msgstr "checksum (%s) is not a valid hexadecimal value" #, python-format msgid "checksum (%s) is not the correct size for md5 (should be 16 bytes)" msgstr "checksum (%s) is not the correct size for MD5 (should be 16 bytes)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "custom properties (%(props)s) conflict with base properties" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "eventlet 'poll' nor 'selects' hubs are available on this platform" msgid "http store must be enabled to use location API by normal user." msgstr "http store must be enabled to use location API by normal user." msgid "limit param must be an integer" msgstr "limit param must be an integer" msgid "limit param must be positive" msgstr "limit param must be positive" msgid "md5 hash of image contents." msgstr "MD5 hash of image contents." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() got unexpected keywords %s" #, python-format msgid "os_hash_algo must be %(want)s, not %(got)s" msgstr "os_hash_algo must be %(want)s, not %(got)s" #, python-format msgid "" "os_hash_value (%(value)s) is not the correct size for %(algo)s (should be " "%(want)d bytes)" msgstr "" "os_hash_value (%(value)s) is not the correct size for %(algo)s (should be " "%(want)d bytes)" #, python-format msgid "os_hash_value (%s) is not a valid hexadecimal value" msgstr "os_hash_value (%s) is not a valid hexadecimal value" #, python-format msgid "" "os_hash_value: (%(value)s) is not the correct size for (%(algo)s) (should be " "(%(want)d) bytes)" msgstr "" "os_hash_value: (%(value)s) is not the correct size for (%(algo)s) (should be " "(%(want)d) bytes)" #, python-format msgid "os_hash_value: (%s) not matched with actual os_hash_value: (%s)" msgstr "os_hash_value: (%s) not matched with actual os_hash_value: (%s)" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "unable to launch %(serv)s. Got error: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id is too long, max size %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/es/0000775000175000017500000000000000000000000016155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000017742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/es/LC_MESSAGES/glance.po0000664000175000017500000014762200000000000021547 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Adriana Chisco Landazábal , 2015 # Alfredo Matas , 2015 # Marian Tort , 2015 # Pablo Sanchez , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:20+0000\n" "Last-Translator: Copied by Zanata \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Ocurrió excepción %(cls)s en la última llamada a rpc: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "" "No se ha encontrado %(m_id)s en la lista de miembros de la imagen %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "Se esta ejecutando %(serv)s (pid %(pid)s) ..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "Parece que %(serv)s ya se está ejecutando: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s no se ha configurado correctamente. No se pudo " "cargar el almacén de sistema de ficheo" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s no se ha configurado adecuadamente. Hace falta " "work dir: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s ing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s ing %(serv)s con %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Por favor especifique el par host:puerto, en donde el host es una " "dirección IPv4, IPv6, nombre de host o FQDN. Si utiliza una dirección IPv6 " "enciérrela entre paréntesis separados del puerto (por ejemplo \"[fe80::a:b:" "c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s no puede contener caracteres 4 byte unicode." #, python-format msgid "%s is already stopped" msgstr "%s ya se detuvo" #, python-format msgid "%s is stopped" msgstr "%s se ha detenido" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Se necesita la opción --os_auth_url ovariable de ambiente OS_AUTH_URL cuando " "la estrategia de autenticación keystone está habilitada\n" msgid "A body is not expected with this request." msgstr "No se espera un cuerpo en esta solicitud." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ya existe el objeto para definición de metadatos de nombre=%(object_name)s " "en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ya existe la propiedad para definición de metadatos de nombre=" "%(property_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ya existe el tipo de recurso para definición de metadatos=" "%(resource_type_name)s" msgid "A set of URLs to access the image file kept in external store" msgstr "" "Conjunto de URLs para acceder al archivo de imagen se mantiene en un almacén " "externo" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Cantidad de espacio de disco (en GB) necesario para la imagen de arranque." msgid "Amount of ram (in MB) required to boot image." msgstr "Cantidad de RAM (en MB) necesario para la imagen de arranque." msgid "An identifier for the image" msgstr "Un identificador para la imagen" msgid "An identifier for the image member (tenantId)" msgstr "Un identificador para el miembro de la imagen (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificador para el propietario de esta tarea" msgid "An identifier for the task" msgstr "Un identificador para la tarea" msgid "An image file url" msgstr "La URL de un archivo de imagen" msgid "An image schema url" msgstr "La URL de un esquema imagen" msgid "An image self url" msgstr "La URL propia de una imagen" msgid "An import task exception occurred" msgstr "Se ha producido una excepción en una tarea de importación" msgid "An object with the same identifier already exists." msgstr "Ya existe un objeto con el mismo identificador." msgid "An object with the same identifier is currently being operated on." msgstr "Ya se está operando un objeto con el mismo identificador." msgid "An object with the specified identifier was not found." msgstr "No se ha encontrado un objeto con el identificador especificado." msgid "An unknown exception occurred" msgstr "Se ha producido una excepción desconocida " msgid "An unknown task exception occurred" msgstr "Se ha producido una excepción desconocida " #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "El atributo '%(property)s' es de sólo lectura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "El atributo '%(property)s' está reservado." #, python-format msgid "Attribute '%s' is read-only." msgstr "El atributo '%s' es de solo lectura." #, python-format msgid "Attribute '%s' is reserved." msgstr "El atributo '%s' está reservado." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "El atributo container_format solo se puede reemplazar por una imagen en cola." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "El atributo isk_format solo se puede remplazar con una imagen en cola." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "No se ha encontrado el servicio de autorización en el URL %(url)s." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Error de autenticación: es posible que el token haya caducado durante la " "carga de archivos. Borrando los datos de imagen de %s." msgid "Authorization failed." msgstr "Ha fallado la autorización." msgid "Available categories:" msgstr "Categorías disponibles:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato de filtro de consulta \"%s\" incorrecto. Utilice la notación de " "DateTime de la ISO 8601." #, python-format msgid "Bad header: %(header_name)s" msgstr "Cabecera incorrecta: %(header_name)s" msgid "Body expected in request." msgstr "Se esperaba un cuerpo en la solicitud." msgid "Cannot be a negative value" msgstr "No puede ser un valor negativo" msgid "Cannot be a negative value." msgstr "No puede ser un valor negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "No se puede convertir imagen %(key)s '%(value)s' en un entero." msgid "Cannot remove last location in the image." msgstr "No se puede eliminar la última ubicación de la imagen." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "No se pueden guardar los datos para la imagen %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "No se puede definir ubicaciones como una lista vacía." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Se ha encontrado un error en la verificación de la suma de comprobación. Se " "ha abortado el almacenamiento en memoria caché de la imagen '%s'." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Solicitud incorrecta/error de conexión a servicio de autorización en el URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construido : %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Descarga de imagen corrupta para imagen %(image_id)s " #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "No se ha podido enlazar con %(host)s:%(port)s después de intentarlo durante " "30 segundos" msgid "Could not find OVF file in OVA archive file." msgstr "No se ha podido encontrar el archivo OVF en el archivo archivador OVA" #, python-format msgid "Could not find metadata object %s" msgstr "No se pudo encontrar el objeto de metadatos %s" #, python-format msgid "Could not find metadata tag %s" msgstr "No se pudo encontrar la etiqueta de metadatos %s" #, python-format msgid "Could not find property %s" msgstr "No se pudo encontrar propiedad %s" #, python-format msgid "Could not find task %s" msgstr "No se encontró tarea %s" #, python-format msgid "Could not update image: %s" msgstr "No se ha podido actualizar la imagen: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Actualmente no se da soporte a los paquetes OVA que contengan múltiples " "discos." msgid "Data supplied was not valid." msgstr "Los datos proporcionados no son válidos. " msgid "Date and time of image member creation" msgstr "Fecha y hora de creación del miembro de la imagen" msgid "Date and time of image registration" msgstr "Fecha y hora del registro de la imagen" msgid "Date and time of last modification of image member" msgstr "Fecha y hora de la última modificación del miembro de la imagen" msgid "Date and time of namespace creation" msgstr "Fecha y hora de creación del espacio de nombre" msgid "Date and time of object creation" msgstr "Fecha y hora de creación del objeto" msgid "Date and time of resource type association" msgstr "Fecha y hora de asociación del tipo de recurso" msgid "Date and time of tag creation" msgstr "Fecha y hora de creación de la etiqueta" msgid "Date and time of the last image modification" msgstr "Fecha y hora de la última modificación de la imagen" msgid "Date and time of the last namespace modification" msgstr "Fecha y hora de la última modificación de espacio de nombre" msgid "Date and time of the last object modification" msgstr "Fecha y hora de la última modificación del objeto" msgid "Date and time of the last resource type association modification" msgstr "" "Fecha y hora de la última modificación de la asociación del tipo de recurso" msgid "Date and time of the last tag modification" msgstr "Fecha y hora de la última modificación de la etiqueta" msgid "Datetime when this resource was created" msgstr "Fecha en la cual se creó este recurso" msgid "Datetime when this resource was updated" msgstr "Fecha en la cual se actualizó este recurso" msgid "Datetime when this resource would be subject to removal" msgstr "Fecha en la cual este recurso estará sujeto a eliminación" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Denegando intento de carga de imagen porque excede la capacidad: %s" msgid "Descriptive name for the image" msgstr "Nombre descriptivo para la imagen" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "El controlador %(driver_name)s no se ha podido configurar correctamente. " "Razón: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Error al descodificar la solicitud. La URL o el cuerpo solicitado contenían " "caracteres que se han podido descodificar en Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Error al captar los miembros de la imagen %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Error en la configuración del almacén. Se ha inhabilitado la adición de " "imágenes a almacen." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Se eperaba un miembro con el formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Se eperaba un estado con el formato: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "No se pudo encontrar imagen %(image_id)s para eliminar" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "No se encontró tipo de recurso %(resourcetype)s para eliminar" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "No se ha podido inicializar la base de datos de memoria caché de imagen. Se " "ha obtenido error: %s" #, python-format msgid "Failed to read %s from config" msgstr "No se ha podido leer %s de la configuración" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "No se permite cargar datos de imagen para imagen %(image_id)s a causa de un " "error HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Error al cargar datos de imagen para imagen %(image_id)s a causa de un error " "interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "El archivo %(path)s tiene un archivo de respaldo %(bfile)s no válido, " "terminando de forma anormal." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "No se permiten las importaciones basadas en ficheros. Por favor use una " "fuente no-local de datos de imagen." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Solicitud no permitida, el espacio de nombre para la definición de metadatos=" "%s no es visible" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Solicitud no permitida, la tarea %s no es visible" msgid "Format of the container" msgstr "Formato del contenedor" msgid "Format of the disk" msgstr "Formato del disco" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" no es válido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host y puerto \"%s\" no es válido." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Solo se incluye mensaje informativo legible para humanos cuando sea " "apropiado (usualmente en error)" msgid "If true, image will not be deletable." msgstr "Si es true, la imagen no se podrá suprimir." msgid "If true, namespace will not be deletable." msgstr "Si es true, no se podrá eliminar el espacio de nombre." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "No se pudo eliminar imagen %(id)s porque está en uso: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "No se pudo encontrar imagen %(image_id)s después de subirla. Es posible que " "la imagen haya sido eliminada durante la carga: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "La imagen %(image_id)s está protegida y no se puede suprimir." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "No se pudo encontrar la imagen %s después de subirla. Es posible que la " "imagen haya sido eliminada durante la carga, limpiando los fragmentos " "cargados." #, python-format msgid "Image %s not found." msgstr "No se encontró imagen %s." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "La imagen excede la capacidad de almacenamiento: %s" msgid "Image id is required." msgstr "Se necesita id de imagen" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Se ha excedido el límite de miembro de imagen para imagen %(id)s: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "No se permite la transición de estado %(cur_status)s a %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "El soporte de almacenamiento de imagen está lleno: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "" "Se ha excedido el límite de etiqueta de imagen para imagen %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problema al cargar la imagen: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "No se ha podido encontrar la imagen con ID %(image_id)s" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Estrategia de autorización incorrecta, se esperaba \"%(expected)s\" pero se " "ha recibido \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Solicitud incorrecta: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "La entrada no contiene el campo '%(key)s'" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Permisos insuficientes en el soporte de almacenamiento de imagen: %s " #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Puntero JSON no válido para este recurso: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "Configuración en fichero en glance-swift no válida." msgid "Invalid configuration in property protection file." msgstr "Configuración en fichero de protección de propiedad no válida." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo de contenido no válido %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valor de filtro no válido %s. No se han cerrado comillas." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valor de filtro no válido %s. No hay una coma antes de cerrar comillas." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "Valor de filtro no válido %s. No hay una coma antes de abrir comillas." msgid "Invalid location" msgstr "Ubicación no válida" #, python-format msgid "Invalid location: %s" msgstr "Ubicaciones no válidas: %s" msgid "Invalid locations" msgstr "Ubicaciones no válidas" #, python-format msgid "Invalid locations: %s" msgstr "Ubicaciones no válidas: %s" msgid "Invalid marker format" msgstr "Formato de marcador no válido" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operación: `%(op)s` no válida. Debe ser una de las siguientes: %(available)s." msgid "Invalid position for adding a location." msgstr "Posición no válida para agregar ubicación." msgid "Invalid position for removing a location." msgstr "Posición no válida para eliminar ubicación." msgid "Invalid service catalog json." msgstr "JSON de catálogo de servicios no válido." #, python-format msgid "Invalid sort direction: %s" msgstr "Dirección de ordenación no válida : %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Clave de ordenación no válida: %(sort_key)s. Debe ser una de las siguientes: " "%(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Valor de estado no válido: %s" #, python-format msgid "Invalid status: %s" msgstr "Estado no válido: %s" #, python-format msgid "Invalid type value: %s" msgstr "Valor de tipo no válido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Actualización no válida. Como resultado será un espacio de nombre para la " "definición de metadatos duplicado con el mismo nombre de %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será un objeto para la definición de " "metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será un objeto para la definición de " "metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será una propiedad para la definición " "de metadatos duplicada con el mismo nombre de=%(name)s en espacio de nombre=" "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Valor no válido'%(value)s' para parametro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valor no válido para opción %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valor de visibilidad no válido : %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "No se permite añadir ubicaciones si son invisibles." msgid "It's not allowed to remove locations if locations are invisible." msgstr "No se permite eliminar ubicaciones si son invisibles." msgid "It's not allowed to update locations if locations are invisible." msgstr "No se permite actualizar las ubicaciones si son invisibles." msgid "List of strings related to the image" msgstr "Lista de series relacionadas con la imagen" msgid "Malformed JSON in request body." msgstr "JSON con formato incorrecto en el cuerpo de la solicitud." msgid "Maximal age is count of days since epoch." msgstr "La edad máxima es el recuento de días desde epoch." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Se ha superado el máximo de redirecciones (%(redirects)s)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Se ha duplicado miembro %(member_id)s para imagen %(image_id)s" msgid "Member can't be empty" msgstr "Miembro no puede estar vacío" msgid "Member to be added not specified" msgstr "No se ha especificado el miembro que añadir" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "El espacio de nombre %(namespace)s de definición de metadatos está " "protegido y no puede eliminarse." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "No se encontró espacio de nombre para la definición de metadatos para id=%s" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "El objeto %(object_name)s de definición de metadatos está protegido y no " "puede eliminarse." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "No se encontró el objeto para la definición de metadatos para id=%s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La propiedad %(property_name)s de definición de metadatos está protegida y " "no puede eliminarse." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "No se encontró propiedad para la definición de metadatos para id=%s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "El tipo de recurso para la definición de metadatos %(resource_type_name)s es " "un tipo de sistema seeded y no puede eliminarse." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "La asociación de tipo de recurso %(resource_type)s de definición de " "metadatos está protegida y no puede eliminarse." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Etiqueta de definición de metadatos %(tag_name)s está protegida y no puede " "eliminarse." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "No se encontró etiqueta para la definición de metadatos para id=%s" #, python-format msgid "Missing required credential: %(required)s" msgstr "Falta la credencial necesaria :%(required)s " #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Varias coincidencias de servicio 'image' para la región %(region)s. Esto " "generalmente significa que es necesaria una región y que no se ha " "proporcionado ninguna." #, python-format msgid "No image found with ID %s" msgstr "No se encontró imagen con ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "No se encontró ubicación con ID %(loc)s de imagen %(img)s" #, python-format msgid "Not allowed to create members for image %s." msgstr "No se permite crear miembros para imagen %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "No está permitido eliminar imagen en estado '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "No se permite eliminar miembros para imagen %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "No se permite eliminar etiquetas para imagen %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "No está permitido reactivar imagen en estado'%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "No se permite actualizar miembros para imagen %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "No se permite actualizar etiquetas para imagen %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "No se permite cargar datos de imagen para imagen %(image_id)s: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "El número de dirs de ordenación no coincide con el número de claves de " "ordenación" msgid "OVA extract is limited to admin" msgstr "La extracción de OVA está limitada al administrador" msgid "Old and new sorting syntax cannot be combined" msgstr "No se puede combinar la antigua y nueva sintaxis de ordenación" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "La operación \"%s\" requiere un miembro llamado \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Los objetos de operación pueden contener exactamente un miembro llamado \"add" "\", \"remove\" o \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Los objetos de operación solo pueden contener un miembro llamado \"add\", " "\"remove\" o \"replace\"." msgid "Operations must be JSON objects." msgstr "Las operaciones deben ser objetos JSON." #, python-format msgid "Original locations is not empty: %s" msgstr "Las ubicaciones originales no están vacías: %s" msgid "Owner can't be updated by non admin." msgstr "Un usuario no admin no puede actualizar al propietario." msgid "Owner of the image" msgstr "Propietario de la imagen" msgid "Owner of the namespace." msgstr "Propietario del espacio de nombre." msgid "Param values can't contain 4 byte unicode." msgstr "Los valores de parámetro no pueden contener 4 byte unicode." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "El puntero `%s` contiene un \"~\" que no forma parte de una secuencia de " "escape reconocida." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "El puntero `%s` contiene adyacente \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "El puntero `%s` contiene un token no válido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "El puntero `%s` no empieza por \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "El puntero `%s` termina en \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Puerto \"%s\" no es válido." #, python-format msgid "Process %d not running" msgstr "No se está ejecutando proceso %d" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Las propiedades %s deben definirse antes de guardar datos." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La propiedad %(property_name)s no inicia con el prefijo de asociación del " "tipo de recurso esperado de '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "La propiedad %s ya está presente." #, python-format msgid "Property %s does not exist." msgstr "La propiedad %s no existe." #, python-format msgid "Property %s may not be removed." msgstr "La propiedad %s no se puede eliminar." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La propiedad %s debe definirse antes de guardar datos." msgid "Property names can't contain 4 byte unicode." msgstr "Los nombre de propiedad no pueden contener 4 byte unicode." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "" "El objeto proporcionado no coincide con el esquema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "No se soporta el estado de tarea proporcionado: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "No se soporta el tipo de tarea proporcionado: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Proporciona una descripción sencilla del espacio de nombre." msgid "Received invalid HTTP redirect." msgstr "Se ha recibido redirección HTTP no válida. " #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirigiendo a %(uri)s para la autorización. " #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "El registro no se ha configurado correctamente en el servidor de API. Razón: " "%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "No se soporta la recarga de %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Recargando %(serv)s (pid %(pid)s) con señal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" msgid "Request body must be a JSON array of operation objects." msgstr "" "El cuerpo de la solicitud debe ser una matriz JSON de objetos de operación." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La respuesta de Keystone no contiene un punto final Glance." msgid "Scope of image accessibility" msgstr "Ámbito de accesibilidad de la imagen" msgid "Scope of namespace accessibility." msgstr "Alcance de accesibilidad del espacio de nombre." #, python-format msgid "Server %(serv)s is stopped" msgstr "El servidor %(serv)s se ha detenido" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "" "Se ha encontrado un error en la creación del trabajador de servidor: " "%(reason)s." msgid "Signature verification failed" msgstr "Ha fallado la verificación de firma" msgid "Size of image file in bytes" msgstr "Tamaño del archivo de imagen en bytes" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Algunos tipos de recurso aceptan más de una clave / par de valor por " "instancia. Por ejemplo, Cinder permite metadatos de usuario e imagen en " "volúmenes. Nova solo evalúa los metadatos de propiedades de imagen " "(planeadores y controladores). Esta propiedad permite un espacio de nombre " "para eliminar la ambigüedad." msgid "Sort direction supplied was not valid." msgstr "La dirección de ordenación proporcionada no es válida." msgid "Sort key supplied was not valid." msgstr "La clave de clasificación proporcionada no es válida. " msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Especifica el prefijo que se usará para el tipo de recurso dado. Cualquier " "propiedad en el espacio de nombre deben tener este prefijo cuando se aplica " "al tipo de recurso especificado. Debe incluir separador de prefijo(por " "ejemplo un punto :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "el estado debe ser \"pending\", \"accepted\" o \"rejected\"." msgid "Status not specified" msgstr "Estado no especificado" msgid "Status of the image" msgstr "Estado de la imaen" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "No se permite la transición de %(cur_status)s a %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Deteniendo %(serv)s (pid %(pid)s) con señal (%(sig)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "Valores para el atributo de imagen 'container_format' soportados" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valores para el atributo de imagen 'disk_format' soportados" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Se suprimió respawn como %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Se ha recibido señal de sistema SIGHUP." #, python-format msgid "Task '%s' is required" msgstr "Se necesita tarea '%s'" msgid "Task does not exist" msgstr "La tarea no existe" msgid "Task failed due to Internal Error" msgstr "La tarea ha fallado a causa de un Error Interno" msgid "Task was not configured properly" msgstr "La tarea no se configuró correctamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "No se encontró tarea con id %(task_id)s proporcionado" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "El filtro \"changes-since\" ya no está disponible en v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "El archivo CA %s que ha especificado no existe" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "El objeto de imagen %(image_id)s que la tarea %(task_id)s está creando, ya " "no tiene un estado válido para un próximo procesamiento. " msgid "The Store URI was malformed." msgstr "El URI del almacén tenía un formato incorrecto." #, python-format msgid "The cert file you specified %s does not exist" msgstr "El archivo de certificado que ha especificado %s no existe" msgid "The current status of this task" msgstr "El estado actual de esta tarea" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "El dispositivo que aloja el directorio de caché de imágenes " "%(image_cache_dir)s no soporta xattr. Es probable que tenga que editar fstab " "y añadir la poción user_xattr en la línea adecuada para que el dispositivo " "aloje el directorio de caché." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "El uri proporcionado no es válido. Por favor especifique un uri válido de la " "siguiente lista de uri soportados %(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "La imagen de entrada es demasiado grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "El archivo de claves que ha especificado %s no existe" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para ubicaciones de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número de miembros de imagen para esta " "imagen. Intentos: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para propiedades de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para etiquetas de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Ya existe la ubicación %(location)s" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Los datos de ubicación contienen un ID no válido: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "No se borró la definición de metadatos%(record_type)s de nombre=" "%(record_name)s- Otros archivos aún se refieren a ésta." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Ya existe el espacio de nombre para definición de metadatos=" "%(namespace_name)s" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "No se encontró el objeto para definición de metadatos de nombre=" "%(object_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "No se encontró la propiedad para definición de metadatos de nombre=" "%(property_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Ya existe la asociación de tipo de recurso del tipo de recurso=" "%(resource_type_name)s para el espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "No se encontró la asociación de tipo de recurso del tipo de recurso para " "definición de metadatos=%(resource_type_name)s para el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "No se encontró el tipo de recurso para definición de metadatos de nombre=" "%(resource_type_name)s" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "No se encontró la etiqueta para definición de metadatos de nombre=%(name)s " "en el espacio de nombre=%(namespace_name)s." msgid "The parameters required by task, JSON blob" msgstr "Los parámetros requeridos por tarea, objeto JSON" msgid "The provided image is too large." msgstr "La imagen proporcionada es demasiado grande." msgid "The request returned 500 Internal Server Error." msgstr "La solicitud ha devuelto el mensaje 500 Error interno del servidor." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La solicitud ha devuelto un error 503 Servicio no disponible. Esto sucede " "generalmente por una sobrecarga del servicio o una interrupción transitoria." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un 302 Múltiples opciones. Generalmente esto " "significa que no se ha incluido un indicador de versión en un URI de " "solicitud.\n" "\n" "El cuerpo de la respuesta devuelta:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un error 413 Entidad de solicitud demasiado grande. " "Esto generalmente significa que se ha infringido el límite de índice o un " "umbral de cuota.\n" "\n" "El cuerpo de la respuesta:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un estado inesperado: %(status)s.\n" "\n" "El cuerpo de la respuesta:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "Se ha desactivado la imagen solicitada. Se prohíbe la descarga de datos de " "imagen." msgid "The result of current task, JSON blob" msgstr "El resultado de la tarea, objeto JSON actual" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "El tamaño de los datos %(image_size)s excederá el límite. Quedan " "%(remaining)s bytes" #, python-format msgid "The specified member %s could not be found" msgstr "No se pudo encontrar el miembro %s especificado" #, python-format msgid "The specified metadata object %s could not be found" msgstr "No se pudo encontrar el objeto de metadatos %s especificado" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "No se pudo encontrar la etiqueta de metadatos %s especificada" #, python-format msgid "The specified namespace %s could not be found" msgstr "No se ha podido encontrar el espacio de nombre %s especificado" #, python-format msgid "The specified property %s could not be found" msgstr "No se pudo encontrar la propiedad %s especificada" #, python-format msgid "The specified resource type %s could not be found " msgstr "No se pudo encontrar el tipo de recurso %s especificado" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "El estado de la ubicación de la imagen eliminada solo se puede establecer " "como 'pending_delete' o 'deleted'." msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "El estado de la ubicación de imagen eliminada solo se puede establecer como " "'pending_delete' o 'deleted'." msgid "The status of this image member" msgstr "El estado de este miembro de la imagen" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "El miembro meta %(member_id)s ya está asociado con la imagen %(image_id)s." msgid "The type of task represented by this content" msgstr "El tipo de tarea representada por este contenido" msgid "The unique namespace text." msgstr "EL único texto de espacio de nombre." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "El nombre fácil de usar para el espacio de nombre. Utilizado por UI si está " "disponible." msgid "There was an error configuring the client." msgstr "Se ha producido un error al configurar el cliente. " msgid "There was an error connecting to a server" msgstr "Se ha producido un error al conectar a un servidor " msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Actualmente no se permite esta operación en las tareas Glance. Se eliminarán " "automáticamente después de alcanzar el tiempo con base en expires_at " "property." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Tiempo de vida en horas para la tarea, así tenga éxito o fracase" msgid "Too few arguments." msgstr "Muy pocos argumentos." msgid "URL to access the image file kept in external store" msgstr "" "La URL para acceder al archivo de imagen se encuentra en un almacén externo" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "No se puede crear fichero pid %(pid)s. ¿Ejecutar como non-root?\n" "Retrocediendo a fichero temporal, puede detener el uso de servicio " "%(service)s:\n" " %(file)s %(server)s detener--fichero-pid %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "No se puede filtrar con el operador desconocido '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "No se ha podido filtrar en un rango con un valor no numérico." msgid "Unable to filter on a unknown operator." msgstr "No se puede filtrar con un operador desconocido." msgid "Unable to filter using the specified operator." msgstr "No se ha podido filtrar utilizando el operador especificado." msgid "Unable to filter using the specified range." msgstr "No se ha podido filtrar mediante el rango especificado." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "No se ha podido encontrar '%s' en el cambio del esquema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "No es posible encontrar `op` en cambio de JSON Schema. Debe ser uno de los " "siguientes: %(available)s. " msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "No se puede aumentar el límite de descripción de fichero ¿Desea ejecutar " "como non-root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "No se ha podido cargar %(app_name)s desde el archivo de configuración " "%(conf_file)s.\n" "Se ha obtenido: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "No se ha podido cargar el esquema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "No se puede ubicar el fichero de configuración de pegado para %s." msgid "Unexpected body type. Expected list/dict." msgstr "Tipo de cuerpo inesperado. Se esperaba list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Respuesta inesperada : %s " #, python-format msgid "Unknown auth strategy '%s'" msgstr "Estrategia de autenticación desconocida '%s' " #, python-format msgid "Unknown command: %s" msgstr "Comando desconocido %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versión de borrador de esquema JSON no reconocida" msgid "Virtual size of image in bytes" msgstr "Tamaño virtual de la imagen en bytes" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Se esperó 15 segundos para que pid %(pid)s (%(file)s) muriera; desistiendo" msgid "You are not authenticated." msgstr "No está autenticado." msgid "You are not authorized to complete this action." msgstr "No está autorizado a completar esta acción." #, python-format msgid "You are not authorized to lookup image %s." msgstr "No tiene autorización para buscar la imagen %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "No tiene autorización para buscar los miembros de la imagen %s." msgid "You are not permitted to create image members for the image." msgstr "No tiene permiso para crear miembros de imagen para la imagen." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "No tiene permiso para crear imágenes propiedad de '%s'." msgid "You do not own this image" msgstr "No es propietario de esta imagen " msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Ha seleccionado utilizar SSL en la conexión y ha proporcionado un " "certificado, pero no ha proporcionado un parámetro key_file ni ha definido " "la variable de entorno GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Ha seleccionado utilizar SSL en la conexión y ha proporcionado una clave, " "pero no ha proporcionado un parámetro cert_file ni ha definido la variable " "de entorno GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() obtuvo un argumento de búsqueda inesperado '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "No se puede pasar de %(current)s a %(next)s en la actualización (se desea " "from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "las propiedades personalizadas (%(props)s) están en conflicto con las " "propiedades base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Los concentradores de 'sondeo' y los de 'selección' no están disponibles en " "esta plataforma" msgid "limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" msgid "limit param must be positive" msgstr "el parámetro de límite debe ser positivo" msgid "md5 hash of image contents." msgstr "md5 hash de contenidos de imagen." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() obtuvo argumentos de búsqueda inesperados %s" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "No se puede iniciar %(serv)s. Se ha obtenido error: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id es demasiado largo, el tamaño máximo es %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/fr/0000775000175000017500000000000000000000000016155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000017742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/fr/LC_MESSAGES/glance.po0000664000175000017500000015163200000000000021543 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Arnaud Legendre , 2013 # Christophe kryskool , 2013 # EVEILLARD , 2013-2014 # Maxime COQUEREL , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:20+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "" "Une exception %(cls)s s'est produite dans le dernier appel d'une procédure " "distante : %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s introuvable dans la liste des membres de l'image %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) est en cours d'exécution..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s semble déjà en cours d'exécution : %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s ne sont pas configurés correctement. Impossible " "de charger le magasin de système de fichiers" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s ne sont pas configurés correctement. Rép de " "travail manquant : %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "Opération %(verb)s en cours sur %(serv)s avec %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Veuillez indiquer une paire hôte:port, dans laquelle hôte est une adresse " "IPv4, une adresse IPv6, un nom d'hôte ou un nom de domaine complet. Si vous " "utilisez une adresse IPv6, faites-la figurer entre crochets de façon à la " "séparer du port (par ex., \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s ne peut pas contenir de caractère Unicode de 4 octets." #, python-format msgid "%s is already stopped" msgstr "%s est déjà stoppé" #, python-format msgid "%s is stopped" msgstr "%s est arrêté" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Option --os_auth_url ou variable d'environnement OS_AUTH_URL requise lorsque " "la stratégie d'authentification keystone est activée\n" msgid "A body is not expected with this request." msgstr "Un corps n'est pas attendu avec cette demande." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Un objet de la définition de métadonnées avec le nom %(object_name)s existe " "déjà dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Une propriété de la définition de métadonnées avec le nom %(property_name)s " "existe déjà dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Un type de ressource de la définition de métadonnées avec le nom " "%(resource_type_name)s existe déjà." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Ensemble d'URL pour accéder au fichier image conservé dans le magasin externe" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Quantité d'espace disque (en Go) requise pour l'image d'initialisation." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantité de mémoire RAM (en Mo) requise pour l'image d'initialisation." msgid "An identifier for the image" msgstr "Identificateur de l'image" msgid "An identifier for the image member (tenantId)" msgstr "Identificateur pour le membre de l'image (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificateur pour le propriétaire de cette tâche" msgid "An identifier for the task" msgstr "Un identificateur pour la tâche" msgid "An image file url" msgstr "URL d'un fichier image" msgid "An image schema url" msgstr "URL d'un schéma d'image" msgid "An image self url" msgstr "URL d'une image self" msgid "An import task exception occurred" msgstr "Une exception liée à la tâche d'importation s'est produite" msgid "An object with the same identifier already exists." msgstr "Un objet avec le même identificateur existe déjà." msgid "An object with the same identifier is currently being operated on." msgstr "Un objet avec le même identificateur est déjà en cours d'utilisation." msgid "An object with the specified identifier was not found." msgstr "Un objet avec l'identificateur spécifié est introuvable." msgid "An unknown exception occurred" msgstr "Une exception inconnue s'est produite" msgid "An unknown task exception occurred" msgstr "Une exception de tâche inconnue s'est produite" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "L'attribut '%(property)s' est en lecture seule." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "L'attribut '%(property)s' est réservé." #, python-format msgid "Attribute '%s' is read-only." msgstr "L'attribut '%s' est en lecture seule." #, python-format msgid "Attribute '%s' is reserved." msgstr "Attribut '%s' est réservé." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "L'attribut container_format ne peut être remplacé que pour une image mise en " "file d'attente." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "L'attribut disk_format ne peut être remplacé que pour une image mise en file " "d'attente." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Service d'auth à l'URL %(url)s non trouvé." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Erreur d'authentification - le jeton a peut-être expiré lors du " "téléchargement de fichier. Suppression des données d'image pour %s." msgid "Authorization failed." msgstr "Echec de l'autorisation." msgid "Available categories:" msgstr "Catégories disponibles :" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Format de filtre de requête \"%s\" incorrect. Utilisez la notation de date " "et heure ISO 8601." #, python-format msgid "Bad header: %(header_name)s" msgstr "Erreur d’entête: %(header_name)s" msgid "Body expected in request." msgstr "Corps attendu dans la demande" msgid "Cannot be a negative value" msgstr "Ne peut pas être une valeur négative" msgid "Cannot be a negative value." msgstr "Ne peut pas être une valeur négative." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "Impossible de convertir l'image %(key)s '%(value)s' en entier." msgid "Cannot remove last location in the image." msgstr "Impossible de supprimer le dernier emplacement dans l'image." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "" "Les données pour l'image %(image_id)s ne peuvent pas être sauvegardées : " "erreur %(error)s" msgid "Cannot set locations to empty list." msgstr "Impossible de définir des emplacements avec une liste vide." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Echec de vérification du total de contrôle. Mise en cache de l'image '%s' " "annulée." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Erreur de connexion/demande erronée pour le service d'auth à l'URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construite : %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "téléchargement d'image endommagée pour l'image %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Liaison impossible à %(host)s:%(port)s après une tentative de 30 secondes" msgid "Could not find OVF file in OVA archive file." msgstr "Fichier OVF introuvable dans le fichier archive OVA." #, python-format msgid "Could not find metadata object %s" msgstr "L'objet métadonnées %s est introuvable" #, python-format msgid "Could not find metadata tag %s" msgstr "Balise de métadonnées %s introuvable" #, python-format msgid "Could not find property %s" msgstr "Propriété %s introuvable" #, python-format msgid "Could not find task %s" msgstr "La tâche %s est introuvable" #, python-format msgid "Could not update image: %s" msgstr "Impossible de mettre à jour l'image : %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Actuellement, les packages OVA contenant plusieurs disques ne sont pas pris " "en charge." msgid "Data supplied was not valid." msgstr "Les données fournies n'étaient pas valides." msgid "Date and time of image member creation" msgstr "Date et heure de création du membre de l'image" msgid "Date and time of image registration" msgstr "Date et heure d'enregistrement de l'image" msgid "Date and time of last modification of image member" msgstr "Date et heure de dernière modification du membre de l'image" msgid "Date and time of namespace creation" msgstr "Date et heure de création de l'espace de nom" msgid "Date and time of object creation" msgstr "Date et heure de création de l'objet" msgid "Date and time of resource type association" msgstr "Date et heure d'association de type de ressource" msgid "Date and time of tag creation" msgstr "Date et heure de création de la balise" msgid "Date and time of the last image modification" msgstr "Date et heure de dernière modification de l'image" msgid "Date and time of the last namespace modification" msgstr "Date et heure de dernière modification de l'espace de nom" msgid "Date and time of the last object modification" msgstr "Date et heure de dernière modification de l'objet" msgid "Date and time of the last resource type association modification" msgstr "" "Date et heure de dernière modification d'association de type de ressource " msgid "Date and time of the last tag modification" msgstr "Date et heure de dernière modification de la balise " msgid "Datetime when this resource was created" msgstr "Date-heure à laquelle cette ressource a été créée" msgid "Datetime when this resource was updated" msgstr "Date-heure à laquelle cette ressource a été mise à jour" msgid "Datetime when this resource would be subject to removal" msgstr "Date-heure à laquelle cette ressource serait soumise à une suppression" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Refus de la tentative de téléchargement d'une image qui dépasse le quota : %s" msgid "Descriptive name for the image" msgstr "Nom descriptif de l'image" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Impossible de configurer le pilote %(driver_name)s correctement. Cause : " "%(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Erreur lors du décodage de votre demande. L'URL ou le corps de la demande " "contiennent des caractères que Glance ne peut pas décoder" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Erreur lors de l'extraction des membres de l'image %(image_id)s : " "%(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Erreur de configuration du magasin. L'ajout d'images au magasin est " "désactivé." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Membre attendu sous la forme : {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Statut attendu sous la forme : {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Échec pour trouver image %(image_id)s à supprimer." #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Echec pour trouver le type de ressource %(resourcetype)s a supprimer" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Impossible d'initialiser la base de données de caches d'image. Erreur " "obtenue : %s" #, python-format msgid "Failed to read %s from config" msgstr "Echec de la lecture de %s à partir de la config" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Echec de téléchargement des données image pour l'image %(image_id)s en " "raison d'une erreur HTTP : %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Echec de téléchargement des données image pour l'image %(image_id)s en " "raison d'une erreur interne : %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Le fichier %(path)s dispose d'un fichier de sauvegarde non valide : " "%(bfile)s. L'opération est abandonnée." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Les importations à partir de fichiers sont interdites. Utilisez une source " "externe de données image." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Interdiction de la demande, l'espace de nom %s de la définition de " "métadonnées n'est pas visible." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Interdiction de la demande, la tâche %s n'est pas visible" msgid "Format of the container" msgstr "Format du conteneur" msgid "Format of the disk" msgstr "Format du disque" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" n'est pas valide." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host et port \"%s\" ne sont pas valides." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Message d'information lisible par l'homme inclus uniquement si approprié " "(habituellement en cas d'incident)" msgid "If true, image will not be deletable." msgstr "Si true, l'image ne pourra pas être supprimée." msgid "If true, namespace will not be deletable." msgstr "Si true, l'espace de nom ne pourra pas être supprimé." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "L'image %(id)s n'a pas pu être supprimée car elle est utilisée : %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Image %(image_id)s introuvable après le téléchargement. Elle a sans doute " "été supprimée au cours du téléchargement : %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "L'image %(image_id)s est protégée et ne peut pas être supprimée." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "L'image %s n'a pas été trouvée après le téléchargement. Elle a sans doute " "été supprimée pendant le téléchargement. Nettoyage des blocs téléchargés." #, python-format msgid "Image %s not found." msgstr "Image %s introuvable." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "l'image %s dépasse le quota de stockage" msgid "Image id is required." msgstr "Id image est requis." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "Le nombre maximal de membres est dépassé pour l'image %(id)s : %(e)s :" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "La transition du statut de l'image de %(cur_status)s vers %(new_status)s " "n'est pas autorisée" #, python-format msgid "Image storage media is full: %s" msgstr "Le support de stockage d'image est saturé : %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Le nombre maximal de balises est dépassé pour l'image %(id)s : %(e)s :" #, python-format msgid "Image upload problem: %s" msgstr "Problème d'envoi de l'image: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "L'image avec l'ID %(image_id)s indiqué est introuvable. " #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Stratégie d'autorisation incorrecte, valeur attendue \"%(expected)s\" mais " "valeur obtenue \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Requête incorrecte: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "L'entrée ne contient pas la zone '%(key)s'" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Droits insuffisants sur le support de stockage d'image : %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Pointeur JSON invalide pour cette ressource : '%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "" "Configuration non valide dans le fichier de configuration glance-swift." msgid "Invalid configuration in property protection file." msgstr "" "Configuration non valide dans le fichier de verrouillage de propriétés." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Type de contenu non valide %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valeur de filtre %s non valide. Les guillemets ne sont pas fermés." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valeur de filtre %s non valide. Il n'y a pas de virgule après la fermeture " "des guillemets." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valeur de filtre %s non valide. Il n'y a pas de virgule avant l'ouverture " "des guillemets." msgid "Invalid location" msgstr "Emplacement non valide" #, python-format msgid "Invalid location: %s" msgstr "Emplacement non valide : %s" msgid "Invalid locations" msgstr "Emplacements non valides" #, python-format msgid "Invalid locations: %s" msgstr "Emplacements non valides : %s" msgid "Invalid marker format" msgstr "Format de marqueur non valide" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Opération non valide : `%(op)s`. Doit être l'une des suivantes : " "%(available)s." msgid "Invalid position for adding a location." msgstr "Position non valide pour l'ajout d'un emplacement." msgid "Invalid position for removing a location." msgstr "Position non valide pour la suppression d'un emplacement." msgid "Invalid service catalog json." msgstr "json de catalogue de service non valide." #, python-format msgid "Invalid sort direction: %s" msgstr "Sens de tri non valide : %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Clé de tri non valide : %(sort_key)s. Doit être l'une des valeurs " "suivantes : %(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Valeur de statut non valide : %s" #, python-format msgid "Invalid status: %s" msgstr "Statut non valide : %s" #, python-format msgid "Invalid type value: %s" msgstr "Type de valeur non valide: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Mise à jour non valide. Elle créerait une de définition de métadonnées en " "double avec le nom %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait un objet de définition de métadonnées " "en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait un objet de définition de métadonnées " "en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait une propriété de définition de " "métadonnées avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "Valeur non valide '%(value)s' pour le paramètre '%(param)s' : %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valeur non valide pour l'option %(option)s : %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valeur de visibilité non valide : %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "" "L'ajout des emplacements n'est pas autorisé si les emplacements sont " "invisibles." msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "La suppression des emplacements n'est pas autorisée si les emplacements sont " "invisibles." msgid "It's not allowed to update locations if locations are invisible." msgstr "" "La mise à jour des emplacements n'est pas autorisée si les emplacements sont " "invisibles." msgid "List of strings related to the image" msgstr "Liste des chaînes associées à l'image" msgid "Malformed JSON in request body." msgstr "JSON incorrect dans le corps de demande." msgid "Maximal age is count of days since epoch." msgstr "L'ancienneté maximale est le nombre de jours depuis l'epoch." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Le nombre maximum de redirections (%(redirects)s) a été dépassé." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Le membre %(member_id)s est en double pour l'image %(image_id)s" msgid "Member can't be empty" msgstr "Membre ne peut pas être vide" msgid "Member to be added not specified" msgstr "Membre à ajouter non spécifié" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "L'espace de nom %(namespace)s de la définition de métadonnées est protégé et " "ne peut pas être supprimé." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "L'espace de nom de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "L'objet %(object_name)s de la définition de métadonnées est protégé et ne " "peut pas être supprimé." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "L'objet de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La propriété %(property_name)s de la définition de métadonnées est protégée " "et ne peut pas être supprimé." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "La propriété de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Le type de ressource %(resource_type_name)s de la définition de métadonnées " "est un type prédéfiniet ne peut pas être supprimé." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "L'association de type de ressource %(resource_type)s de la définition de " "métadonnées est protégée et ne peut pas être supprimée." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "La balise de définition de métadonnées %(tag_name)s est protégée et ne peut " "pas être supprimée." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "La balise de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "Missing required credential: %(required)s" msgstr "Données d'identification obligatoires manquantes : %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Plusieurs correspondances de service 'image' pour la région %(region)s. En " "général, cela signifie qu'une région est requise et que vous n'en avez pas " "indiquée." #, python-format msgid "No image found with ID %s" msgstr "aucune image trouvée avec l'identifiant %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Aucun emplacement trouvé avec l'ID %(loc)s dans l'image %(img)s" #, python-format msgid "Not allowed to create members for image %s." msgstr "Non autorisé à créer des membres pour l'image %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Non autorisé à désactiver l'image dans l'état '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Non autorisé à supprimer des membres de l'image %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Non autorisé à supprimer des balises de l'image %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Non autorisé à réactiver l'image dans l'état '%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "Non autorisé à mettre à jour les membres de l'image %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Non autorisé à mettre à jour des balises de l'image %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Non autorisé à télécharger des données image pour l'image %(image_id)s : " "%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "Le nombre de rép de tri ne correspond pas au nombre de clés de tri" msgid "OVA extract is limited to admin" msgstr "L'extraction de fichiers OVA est limitée à admin" msgid "Old and new sorting syntax cannot be combined" msgstr "" "Les syntaxes de tri anciennes et nouvelles ne peuvent pas être combinées" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "L'opération \"%s\" requiert un membre nommé \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Les objets d'opération doivent contenir exactement un seul membre nommé \"add" "\", \"remove\" ou \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Les objets d'opération doivent contenir un seul membre nommé \"add\", " "\"remove\" ou \"replace\"." msgid "Operations must be JSON objects." msgstr "Les opérations doivent être des objets JSON." #, python-format msgid "Original locations is not empty: %s" msgstr "L'emplacement original %s n'est pas vide" msgid "Owner can't be updated by non admin." msgstr "Le propriétaire ne peut être mis à jour que par un administrateur." msgid "Owner of the image" msgstr "Propriétaire de l'image" msgid "Owner of the namespace." msgstr "Propriétaire de l'espace de nom." msgid "Param values can't contain 4 byte unicode." msgstr "" "Les valeurs de paramètre ne peuvent pas contenir de caractère Unicode de 4 " "octets." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Le pointeur `%s` contient \"~\" qui ne fait pas partie d'une séquence " "d'échappement reconnue." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Le pointeur `%s` contient des éléments \"/\" adjacent." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "le Pointeur '%s' ne contient pas de jeton valide." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Le pointeur `%s` ne commence pas par \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "le pointeur '%s' se termine avec un \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Port \"%s\" n'est pas valide." #, python-format msgid "Process %d not running" msgstr "Le processus %d n'est pas en fonctionnement" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "" "Les propriétés %s doivent être définies avant de sauvegarder les données." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La propriété %(property_name)s ne commence pas par le préfixe d'association " "de type de ressource attendu : '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Propriété %s déjà présente." #, python-format msgid "Property %s does not exist." msgstr "La propriété %s n'existe pas." #, python-format msgid "Property %s may not be removed." msgstr "La propriété %s n'est peut-être pas supprimée." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La propriété %s doit être définie avant de sauvegarder les données." msgid "Property names can't contain 4 byte unicode." msgstr "" "Les noms de propriété ne peuvent pas contenir de caractère Unicode de 4 " "octets." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "L'objet fourni ne correspond pas au schéma '%(schema)s' : %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Le statut fourni de la tâche n'est pas pris en charge : %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Le type de tâche fourni n'est pas pris en charge : %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fournit une description conviviale de l'espace de nom." msgid "Received invalid HTTP redirect." msgstr "Redirection HTTP non valide reçue." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirection vers %(uri)s pour autorisation." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Le registre n'a pas été configuré correctement sur le serveur d'API. Cause : " "%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Rechargement de %(serv)s non pris en charge" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Rechargement de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" msgid "Request body must be a JSON array of operation objects." msgstr "Le corps de la demande doit être une matrice JSON d'objets Opération." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La réponse de Keystone ne contient pas un noeud final Glance." msgid "Scope of image accessibility" msgstr "Périmètre d'accessibilité de l'image" msgid "Scope of namespace accessibility." msgstr "Périmètre de l'accessibilité de l'espace de nom." #, python-format msgid "Server %(serv)s is stopped" msgstr "Le serveur %(serv)s est arrêté" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Echec de la création de travailleur de serveur : %(reason)s." msgid "Signature verification failed" msgstr "La vérification de la signature a échoué" msgid "Size of image file in bytes" msgstr "Taille du fichier image en octets" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Certains types de ressource autorisent plusieurs paires clé-valeur par " "instance. Par exemple, Cinder autorise les métadonnées d'utilisateur et " "d'image sur les volumes. Seules les métadonnées de propriétés d'image sont " "évaluées par Nova (planification ou pilotes). Cette propriété autorise une " "cible d'espace de nom pour lever l'ambiguïté." msgid "Sort direction supplied was not valid." msgstr "Le sens de tri fourni n'était pas valide." msgid "Sort key supplied was not valid." msgstr "La clé de tri fournie n'était pas valide." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Spécifie le préfixe à utiliser pour le type de ressource donné. Toutes les " "propriétés de l'espace de nom doivent être précédées de ce préfixe " "lorsqu'elles s'appliquent au type de ressource spécifié. Vous devez inclure " "un séparateur de préfixe (par exemple, le signe deux-points :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "L'état doit être \"en attente\", \"accepté\" ou \"rejeté\"." msgid "Status not specified" msgstr "Statut non spécifié" msgid "Status of the image" msgstr "Statut de l'image" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "La transition de statut de %(cur_status)s vers %(new_status)s n'est pas " "autorisée" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Arrêt de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "Valeurs prises en charge pour l'attribut d'image 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valeurs prises en charge pour l'attribut d'image 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "La relance supprimée en tant que %(serv)s était %(rsn)s." msgid "System SIGHUP signal received." msgstr "Signal SIGHUP du système reçu." #, python-format msgid "Task '%s' is required" msgstr "La tâche '%s' est obligatoire" msgid "Task does not exist" msgstr "La tâche n'existe pas" msgid "Task failed due to Internal Error" msgstr "Echec de la tâche en raison d'une erreur interne" msgid "Task was not configured properly" msgstr "La tâche n'a pas été configurée correctement" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "La tâche avec l'identificateur donné %(task_id)s est introuvable" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Le filtre \"changes-since\" n'est plus disponible sur la version 2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "" "Le fichier d'autorité de certification que vous avez spécifié %s n'existe pas" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "L'objet image %(image_id)s créé par la tâche %(task_id)s n'est plus dans un " "statut valide pour un traitement ultérieur." msgid "The Store URI was malformed." msgstr "L'URI de magasin était incorrect." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Le fichier de certificats que vous avez spécifié %s n'existe pas" msgid "The current status of this task" msgstr "Le statut actuel de cette tâche" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "L'unité hébergeant le répertoire de cache d'image %(image_cache_dir)s ne " "prend pas en charge xattr. Vous devez probablement éditer votre fstab et " "ajouter l'option user_xattr sur la ligne appropriée de l'unité hébergeant le " "répertoire de cache." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "L'identificateur URI fourni n'est pas valide. Indiquez un identificateur URI " "valide sélectionné dans la liste des identificateurs URI pris en charge : " "%(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "L'image entrante est trop grande : %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Le fichier de clés que vous avez spécifié %s n'existe pas" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre d'emplacements d'image autorisés. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de membres d'image autorisés pour " "cette image. Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de propriétés d'image autorisées. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de balises d'image autorisées. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "L'emplacement %(location)s existe déjà" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Les données d'emplacement possèdent un ID non valide : %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "La définition de métadonnées %(record_type)s avec le nom %(record_name)s n'a " "pas été supprimée. Elle est encore associée à d'autres enregistrements." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "L'espace de nom %(namespace_name)s de la définition de métadonnées existe " "déjà." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "L'objet %(object_name)s de la définition de métadonnées est introuvable dans " "l'espace de nom %(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "La propriété %(property_name)s de la définition de métadonnées est " "introuvable dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "L'association de type de ressource de la définition de métadonnées entre " "letype de ressource %(resource_type_name)s et l'espace de nom " "%(namespace_name)s existe déjà." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "L'association de type de ressource de la définition de métadonnées entre " "letype de ressource %(resource_type_name)s et l'espace de nom " "%(namespace_name)s est introuvable." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Le type de ressource %(resource_type_name)s de la définition de métadonnées " "est introuvable." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "La balise de définition de métadonnées nommée %(name)s est introuvable dans " "l'espace de nom %(namespace_name)s." msgid "The parameters required by task, JSON blob" msgstr "Les paramètres requis par la tâche, blob JSON" msgid "The provided image is too large." msgstr "L'image fournie est trop volumineuse." msgid "The request returned 500 Internal Server Error." msgstr "La demande a renvoyé le message 500 Internal Server Error." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La demande a renvoyé le message 503 Service Unavailable. Cela se produit " "généralement lors d'une surcharge de service ou de tout autre coupure " "transitoire." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La demande a renvoyé un message 302 Multiple Choices. Cela signifie " "généralement que vous n'avez pas inclus d'indicateur de version dans l'URI " "de demande.\n" "\n" "Le corps de la réponse a renvoyé :\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La demande a renvoyé un message 413 Request Entity Too Large. Cela signifie " "généralement que le taux limite ou le seuil de quota a été dépassé.\n" "\n" "Corps de la réponse :\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La demande a renvoyé un statut inattendu : %(status)s.\n" "\n" "Corps de la réponse :\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "L'image demandée a été désactivée. Le téléchargement des données image est " "interdit." msgid "The result of current task, JSON blob" msgstr "Le résultat de la tâche en cours, blob JSON" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "La taille des données %(image_size)s dépassera la limite. %(remaining)s " "octets restants." #, python-format msgid "The specified member %s could not be found" msgstr "Le membre spécifié %s est introuvable" #, python-format msgid "The specified metadata object %s could not be found" msgstr "L'objet métadonnées spécifié %s est introuvable" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "La balise de métadonnées %s est introuvable" #, python-format msgid "The specified namespace %s could not be found" msgstr "L'espace de nom spécifié %s est introuvable" #, python-format msgid "The specified property %s could not be found" msgstr "La propriété spécifiée %s est introuvable" #, python-format msgid "The specified resource type %s could not be found " msgstr "Le type de ressource spécifié %s est introuvable " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " "'pending_delete' ou 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " "'pending_delete' ou 'deleted'." msgid "The status of this image member" msgstr "Statut de ce membre d'image" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "Le membre cible %(member_id)s est déjà associé à l'image %(image_id)s." msgid "The type of task represented by this content" msgstr "Le type de tâche représenté par ce contenu" msgid "The unique namespace text." msgstr "Texte unique de l'espace de nom." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Nom convivial de l'espace de nom. Utilisé par l'interface utilisateur si " "disponible." msgid "There was an error configuring the client." msgstr "Une erreur s'est produite lors de la configuration du client." msgid "There was an error connecting to a server" msgstr "Une erreur s'est produite lors de la connexion à un serveur." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Cette opération n'est actuellement pas autorisée sur les tâches Glance. " "Elles sont supprimées automatiquement après avoir atteint l'heure définie " "par la propriété expires_at." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Durée de vie en heures d'une tâche suite à une réussite ou à un échec" msgid "Too few arguments." msgstr "Trop peu d'arguments." msgid "URL to access the image file kept in external store" msgstr "" "URL permettant d'accéder au fichier image conservé dans le magasin externe" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossible de créer le fichier PID %(pid)s. Exécution en tant que non " "root ?\n" "Rétablissement vers un fichier temporaire. Vous pouvez arrêter le service " "%(service)s avec :\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Filtrage impossible avec l'opérateur inconnu '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "Impossible de filtrer sur une plage avec une valeur non numérique." msgid "Unable to filter on a unknown operator." msgstr "Filtrage impossible avec un opérateur inconnu." msgid "Unable to filter using the specified operator." msgstr "Filtrage impossible à l'aide de l'opérateur spécifié." msgid "Unable to filter using the specified range." msgstr "Impossible de filtrer à l'aide de la plage spécifiée." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Impossible de trouver '%s' dans la modification du schéma JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Impossible de localiser `op` dans la modification du schéma JSON. Doit être " "l'une des valeurs suivantes : %(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Impossible d'augmenter la limite de descripteur de fichier. Exécution en " "tant que non root ?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Impossible de charger %(app_name)s depuis le fichier de configuration " "%(conf_file)s.\n" "Résultat : %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Impossible de charger le schéma : %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "" "Impossible de localiser le fichier de configuration du collage pour %s." msgid "Unexpected body type. Expected list/dict." msgstr "Type de corps inattendu. Type attendu : list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Réponse inattendue : %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Stratégie d'autorisation inconnue '%s'" #, python-format msgid "Unknown command: %s" msgstr "commande %s inconnue" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Version brouillon du schéma JSON non reconnue" msgid "Virtual size of image in bytes" msgstr "Taille virtuelle de l'image en octets" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Attente de la fin du pid %(pid)s (%(file)s) pendant 15 secondes ; abandon en " "cours" msgid "You are not authenticated." msgstr "Vous n'êtes pas authentifié." msgid "You are not authorized to complete this action." msgstr "Vous n'êtes pas autorisé à effectuer cette action." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Vous n'êtes pas autorisé à rechercher l'image %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Vous n'êtes pas autorisé à rechercher les membres de l'image %s." msgid "You are not permitted to create image members for the image." msgstr "Vous n'êtes pas autorisé à créer des membres image pour l'image." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Vous n'êtes pas autorisé à créer des images détenues par '%s'." msgid "You do not own this image" msgstr "Vous n'êtes pas propriétaire de cette image" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Vous avez choisi d'utiliser SSL pour la connexion et avez fourni un " "certificat, cependant, vous n'avez pas fourni de paramètre key_file ou " "n'avez pas défini la variable d'environnement GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Vous avez choisi d'utiliser SSL pour la connexion et avez fourni une clé, " "cependant, vous n'avez pas fourni de paramètre cert_file ou n'avez pas " "défini la variable d'environnement GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() a récupéré un argument de mot clé '%s' inattendu" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "impossible d'effectuer la transition depuis %(current)s vers %(next)s dans " "la mise à jour (voulu : from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "propriétés personnalisées (%(props)s) en conflit avec les propriétés de base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Les concentrateurs Eventlet 'poll' et 'selects' sont indisponibles sur cette " "plateforme" msgid "limit param must be an integer" msgstr "le paramètre limit doit être un entier" msgid "limit param must be positive" msgstr "le paramètre limit doit être positif" msgid "md5 hash of image contents." msgstr "Hachage md5 du contenu d'image." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() a récupéré des mots-clés %s inattendus" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "impossible de lancer %(serv)s. Erreur : %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id est trop long, sa taille maximale est de %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/it/0000775000175000017500000000000000000000000016162 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/it/LC_MESSAGES/0000775000175000017500000000000000000000000017747 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/it/LC_MESSAGES/glance.po0000664000175000017500000015001100000000000021536 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andreas Jaeger , 2016. #zanata # KATO Tomoyuki , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-03 01:43+0000\n" "Last-Translator: KATO Tomoyuki \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Eccezione %(cls)s generata nell'ultima chiamata rpc: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s non trovato nell'elenco di membri dell'immagine %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) in esecuzione..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s sembra essere già in esecuzione: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s di %(task_type)s non configurato correttamente. Impossibile " "caricare l'archivio filesystem" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s di %(task_type)s non configurato correttamente. Directory di " "lavoro mancante: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s %(serv)s con %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Specificare una coppia host:port in cui host è un indirizzo IPv4, un " "indirizzo IPv6 nome host o FQDN. Se si utilizza un indirizzo IPv6 " "racchiuderlo in parentesi separatamente dalla porta (ad esempio, \"[fe80::a:" "b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s non può contenere 4 byte di caratteri unicode." #, python-format msgid "%s is already stopped" msgstr "%s è già stato arrestato" #, python-format msgid "%s is stopped" msgstr "%s è stato arrestato" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "l'opzione --os_auth_url o la variabile d'ambiente OS_AUTH_URL sono " "obbligatori quando è abilitato il modo di autenticazione keystone\n" msgid "A body is not expected with this request." msgstr "Un corpo non è previsto con questa richiesta." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Un oggetto della definizione di metadati con nome=%(object_name)s già " "esiste nello nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Una proprietà della definizione di metadati con nome=%(property_name)s già " "esiste nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Un tipo-risorsa della definizione di metadati con nome=" "%(resource_type_name)s già esiste." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Un insieme di URL per accedere al file di immagini conservato nell'archivio " "esterno" msgid "Amount of disk space (in GB) required to boot image." msgstr "Quantità di spazio su disco (in GB) richiesto per l'immagine di avvio." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantità di ram (in MB) richiesta per l'immagine di avvio." msgid "An identifier for the image" msgstr "Un identificativo per l'immagine" msgid "An identifier for the image member (tenantId)" msgstr "Un identificativo per il membro dell'immagine (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificativo del proprietario di questa attività" msgid "An identifier for the task" msgstr "Un identificativo per l'attività" msgid "An image file url" msgstr "Un URL al file di immagini" msgid "An image schema url" msgstr "Un URL allo schema di immagini" msgid "An image self url" msgstr "Un URL personale all'immagine" msgid "An import task exception occurred" msgstr "Si è verificata un'eccezione attività di importazione" msgid "An object with the same identifier already exists." msgstr "Già esiste un oggetto con lo stesso identificativo." msgid "An object with the same identifier is currently being operated on." msgstr "Un oggetto con lo stesso identificativo è attualmente in uso." msgid "An object with the specified identifier was not found." msgstr "Impossibile trovare un oggetto con l'identificativo specificato." msgid "An unknown exception occurred" msgstr "Si è verificata un'eccezione sconosciuta" msgid "An unknown task exception occurred" msgstr "Si è verificata un'eccezione attività sconosciuta" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Attributo '%(property)s' è di sola lettura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "L'attributo '%(property)s' è riservato." #, python-format msgid "Attribute '%s' is read-only." msgstr "Attributo '%s' è di sola lettura." #, python-format msgid "Attribute '%s' is reserved." msgstr "L'attributo '%s' è riservato." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "L'attributo container_format può essere sostituito solo per un'immagine " "nella coda." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "L'attributo disk_format può essere sostituito solo per un'immagine nella " "coda." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Servizio di autenticazione all'URL %(url)s non trovato." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Errore di autenticazione - il token potrebbe essere scaduto durante il " "caricamento del file. Eliminazione dei dati dell'immagine per %s." msgid "Authorization failed." msgstr "Autorizzazione non riuscita." msgid "Available categories:" msgstr "Categorie disponibili:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato filtro di query \"%s\" errato. Utilizzare la notazione ISO 8601 " "DateTime." #, python-format msgid "Bad header: %(header_name)s" msgstr "Intestazione non valida: %(header_name)s" msgid "Body expected in request." msgstr "Corpo previsto nella richiesta." msgid "Cannot be a negative value" msgstr "Non può essere un valore negativo" msgid "Cannot be a negative value." msgstr "Non può essere un valore negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Impossibile convertire %(key)s dell'immagine '%(value)s' in un numero intero." msgid "Cannot remove last location in the image." msgstr "Impossibile rimuovere l'ultima ubicazione nell'immagine." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Impossibile salvare i dati per l'immagine %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Impossibile impostare le ubicazione nell'elenco vuoto." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Verifica checksum non riuscita. È stata interrotta la memorizzazione nella " "cache dell'immagine '%s'." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Connetti richiesta/non corretta o in errore per il servizio di " "autenticazione all'URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL costruita: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "" "Esecuzione del download immagine danneggiato per l'immagine %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Impossibile collegarsi a %(host)s:%(port)s dopo aver tentato per 30 secondi" msgid "Could not find OVF file in OVA archive file." msgstr "Impossibile trovare il file OVD nel file di archivio OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Impossibile trovare l'oggetto di metadati %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Impossibile trovare il tag di metadati %s" #, python-format msgid "Could not find property %s" msgstr "Impossibile trovare la proprietà %s" #, python-format msgid "Could not find task %s" msgstr "Impossibile trovare l'attività %s" #, python-format msgid "Could not update image: %s" msgstr "Impossibile aggiornare l'immagine: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Attualmente, i pacchetti OVA che contengono più dischi non sono supportati." msgid "Data supplied was not valid." msgstr "I dati forniti non erano validi." msgid "Date and time of image member creation" msgstr "Data e ora di creazione del membro dell'immagine" msgid "Date and time of image registration" msgstr "Data e ora della registrazione dell'immagine" msgid "Date and time of last modification of image member" msgstr "Data e ora dell'ultima modifica del membro dell'immagine" msgid "Date and time of namespace creation" msgstr "Data ed ora della creazione dello spazio dei nomi" msgid "Date and time of object creation" msgstr "Data ed ora della creazione dell'oggetto" msgid "Date and time of resource type association" msgstr "Data ed ora dell'associazione del tipo di risorsa" msgid "Date and time of tag creation" msgstr "Data ed ora della creazione del tag" msgid "Date and time of the last image modification" msgstr "Data e ora dell'ultima modifica dell'immagine" msgid "Date and time of the last namespace modification" msgstr "Data ed ora dell'ultima modifica allo spazio dei nomi" msgid "Date and time of the last object modification" msgstr "Data ed ora dell'ultima modifica all'oggetto" msgid "Date and time of the last resource type association modification" msgstr "Data ed ora dell'ultima modifica all'associazione del tipo di risorsa" msgid "Date and time of the last tag modification" msgstr "Data ed ora dell'ultima modifica al tag" msgid "Datetime when this resource was created" msgstr "Data e ora in cui questa risorsa è stata creata" msgid "Datetime when this resource was updated" msgstr "Data e ora in cui questa risorsa è stata aggiornata" msgid "Datetime when this resource would be subject to removal" msgstr "Data e ora in cui questa risorsa verrà rimossa" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Rifiutato il tentativo di caricare l'immagine perché supera la quota: %s" msgid "Descriptive name for the image" msgstr "Nome descrittivo per l'immagine" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Impossibile configurare il driver %(driver_name)s correttamente. Motivo: " "%(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Errore di decodifica della richiesta. L'URL o il corpo della richiesta " "contengono caratteri che non possono essere decodificati da Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Errore durante il recupero dei membri immagine %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Errore nella configurazione dell'archivio. L'aggiunta di immagini a questo " "archivio non è consentita." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Previsto un membro nel formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Previsto uno stato nel formato: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Impossibile trovare l'immagine %(image_id)s da eliminare" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Impossibile trovare il tipo di risorsa %(resourcetype)s da eliminare" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Impossibile inizializzare il database cache immagini. Errore ricevuto: %s" #, python-format msgid "Failed to read %s from config" msgstr "Impossibile leggere %s dalla configurazione" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " "HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " "interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Il file %(path)s ha un file di backup %(bfile)s non valido, operazione " "interrotta." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Le importazioni basata su file non sono consentite. Utilizzare un'origine " "dati dell'immagine non locale." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Richiesta vietata, lo spazio dei nomi della definizione di metadati =%s non " "è visibile." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Richiesta vietata, l'attività %s non è visibile" msgid "Format of the container" msgstr "Formato del contenitore" msgid "Format of the disk" msgstr "Formato del disco" #, python-format msgid "Host \"%s\" is not valid." msgstr "L'host \"%s\" non è valido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host o porta \"%s\" non è valido." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "I messaggi informativi leggibili dall'utente sono inclusi solo se necessario " "(di solito in caso di errore)" msgid "If true, image will not be deletable." msgstr "Se true, l'immagine non sarà eliminabile." msgid "If true, namespace will not be deletable." msgstr "Se impostato su true, lo spazio dei nomi non sarà eliminabile." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "L'immagine %(id)s non può essere eliminata perché è in uso: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Impossibile trovare l'immagine %(image_id)s dopo il caricamento. L'immagine " "potrebbe essere stata eliminata durante il caricamento: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "L'immagine %(image_id)s è protetta e non può essere eliminata." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Impossibile trovare l'immagine %s dopo il caricamento. L'immagine potrebbe " "essere stata eliminata durante il caricamento. Eliminazione delle porzioni " "caricate." #, python-format msgid "Image %s not found." msgstr "Immagine %s non trovata." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "L'immagine supera la quota di memoria: %s" msgid "Image id is required." msgstr "ID immagine obbligatorio." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Superato il limite del membro dell'immagine per l'immagine %(id)s: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Il passaggio di stato dell'immagine da %(cur_status)s a %(new_status)s non è " "consentito" #, python-format msgid "Image storage media is full: %s" msgstr "Il supporto di memorizzazione dell'immagine è pieno: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Superato il limite di tag dell'immagine per l'immagine %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problemi nel caricamento dell'immagine: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "L'immagine con l'id fornito %(image_id)s non è stata trovata" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Strategia di autenticazione errata, previsto \"%(expected)s\" ma ricevuto " "\"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Richiesta non corretta: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "L'input non contiene il campo '%(key)s'" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "" "Autorizzazioni insufficienti sul supporto di memorizzazione immagini: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Puntatore JSON non valido per questa risorsa: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "Configurazione nel file di configurazione glance-swift non valida." msgid "Invalid configuration in property protection file." msgstr "Configurazione non valida nel file di protezione della proprietà." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo contenuto non valido %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valore filtro non valido %s. Le virgolette non sono chiuse." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valore filtro non valido %s. Non è presente una virgola prima delle " "virgolette di chiusura." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valore filtro non valido %s. Non è presente una virgola prima delle " "virgolette di apertura." msgid "Invalid location" msgstr "Ubicazione non valida" #, python-format msgid "Invalid location: %s" msgstr "Ubicazione non valida: %s" msgid "Invalid locations" msgstr "Ubicazioni non valide" #, python-format msgid "Invalid locations: %s" msgstr "Ubicazioni non valide: %s" msgid "Invalid marker format" msgstr "Formato indicatore non valido" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operazione non valida: `%(op)s`. Deve essere uno dei seguenti: %(available)s." msgid "Invalid position for adding a location." msgstr "Posizione non valida per l'aggiunta di una ubicazione." msgid "Invalid position for removing a location." msgstr "Posizione non valida per la rimozione di una ubicazione." msgid "Invalid service catalog json." msgstr "json del catalogo del servizio non è valido." #, python-format msgid "Invalid sort direction: %s" msgstr "Direzione ordinamento non valida: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Chiave di ordinamento non valida: %(sort_key)s. Deve essere una delle " "seguenti: %(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Valore di stato non valido: %s" #, python-format msgid "Invalid status: %s" msgstr "Stato non valido: %s" #, python-format msgid "Invalid type value: %s" msgstr "Valore di tipo non valido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " "definizione di metadati duplicato con lo stesso nome di %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " "metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " "metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " "definizione di metadati duplicato con lo stesso nome=%(name)s nello spazio " "dei nomi=%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "Valore '%(value)s' non valido per il parametro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valore non valido per l'opzione %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valore visibilità non valido: %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "" "Non è consentito aggiungere ubicazione se le ubicazioni sono invisibili." msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "Non è consentito rimuovere ubicazioni se le ubicazioni sono invisibili." msgid "It's not allowed to update locations if locations are invisible." msgstr "Non è consentito caricare ubicazioni se le ubicazioni sono invisibili." msgid "List of strings related to the image" msgstr "Elenco di stringhe relative all'immagine" msgid "Malformed JSON in request body." msgstr "JSON non corretto nel corpo della richiesta." msgid "Maximal age is count of days since epoch." msgstr "L'età massima è il numero di giorni dal periodo." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Il numero massimo di rendirizzamenti (%(redirects)s) è stato superato." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Il membro %(member_id)s è il duplicato dell'immagine %(image_id)s" msgid "Member can't be empty" msgstr "Il membro non può essere vuoto" msgid "Member to be added not specified" msgstr "Membro da aggiungere non specificato" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Lo spazio dei nomi della definizione di metadati %(namespace)s è protetto e " "non è possibile eliminarlo." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "Lo spazio dei nomi della definizione dei metadati per l'id=%s non è stato " "trovato" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "L'oggetto di definizione di metadati %(object_name)s è protetto e non è " "possibile eliminarlo." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "" "L'oggetto della definizione dei metadati per l'id=%s non è stato trovato" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La proprietà della definizione di metadati %(property_name)s è protetta e " "non è possibile eliminarlo." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "" "La proprietà della definizione dei metadati per l'id=%s non è stata trovata" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Il tipo-risorsa della definizione di metadati %(resource_type_name)s è un " "tipo inserito dalsistema e non è possibile eliminarlo." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "L'associazione-tipo-risorsa della definizione di metadati %(resource_type)s " "è protetta e non può essere eliminata." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Il tag di definizione dei metadati %(tag_name)s è protetto e non può essere " "eliminato." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Il tag di definizione dei metadati per l'id=%s non è stato trovato" #, python-format msgid "Missing required credential: %(required)s" msgstr "Credenziale richiesta mancante: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Il servizio 'immagine' multipla corrisponde nella regione %(region)s. Questo " "in genere significa che una regione è obbligatoria e non ne è stata fornita " "una." #, python-format msgid "No image found with ID %s" msgstr "Nessuna immagine trovata con ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "" "Non è stata trovata nessuna ubicazione con ID %(loc)s dall'immagine %(img)s" #, python-format msgid "Not allowed to create members for image %s." msgstr "Non è consentito creare membri per l'immagine %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Disattivazione dell'immagine in stato '%s' non consentita" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Non è consentito eliminare i membri dell'immagine %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Non è consentito eliminare i tag dell'immagine %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Riattivazione dell'immagine in stato '%s' non consentita" #, python-format msgid "Not allowed to update members for image %s." msgstr "Non è consentito aggiornare i membri dell'immagine %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Non è consentito aggiornare i tag dell'immagine %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Non è consentito caricare i dati dell'immagine per l'immagine %(image_id)s: " "%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Il numero di directory di ordinamento non corrisponde al numero di chiavi di " "ordinamento" msgid "OVA extract is limited to admin" msgstr "L'estrazione OVA è limitata all'amministratore" msgid "Old and new sorting syntax cannot be combined" msgstr "Impossibile combinare la nuova e la precedente sintassi di ordinamento" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "L'operazione \"%s\" richiede un membro denominato \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Gli oggetti dell'operazione devono contenere esattamente un membro " "denominato \"add\", \"remove\" o \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Gli oggetti dell'operazione devono contenere solo un membro denominato \"add" "\", \" remove \" o \"replace\"." msgid "Operations must be JSON objects." msgstr "Le operazioni devono essere oggetti JSON." #, python-format msgid "Original locations is not empty: %s" msgstr "Le ubicazioni originali non sono vuote: %s" msgid "Owner can't be updated by non admin." msgstr "Il proprietario non può essere aggiornato da un non admin." msgid "Owner of the image" msgstr "Proprietario dell'immagine" msgid "Owner of the namespace." msgstr "Proprietario dello spazio dei nomi." msgid "Param values can't contain 4 byte unicode." msgstr "I valori dei parametri non possono contenere 4 byte unicode." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Il puntatore `%s` contiene \"~\" che non fa parte di una sequenza escape " "riconosciuta." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Il puntatore `%s` contiene l'adiacente \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Il puntatore `%s` non contiene token valido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Il puntatore `%s` non inizia con \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Il puntatore `%s` finisce con \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "La porta \"%s\" non è valida." #, python-format msgid "Process %d not running" msgstr "Il processo %d non è in esecuzione" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Le proprietà %s devono essere impostate prima di salvare i dati." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La proprietà %(property_name)s non inizia con il prefisso di associazione " "del tipo di risorsa previsto '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "La proprietà %s è già presente." #, python-format msgid "Property %s does not exist." msgstr "La proprietà %s non esiste." #, python-format msgid "Property %s may not be removed." msgstr "La proprietà %s non può essere rimossa." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La proprietà %s deve essere impostata prima di salvare i dati." msgid "Property names can't contain 4 byte unicode." msgstr "I nomi delle proprietà non possono contenere 4 byte unicode." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "L'oggetto fornito non corrisponde allo schema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Lo stato dell'attività fornito non è supportato: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Il tipo dell'attività fornito non è supportato: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fornisce una semplice descrizione utente dello spazio dei nomi." msgid "Received invalid HTTP redirect." msgstr "Ricevuto un reindirizzamento HTTP non valido." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Reindirizzamento a %(uri)s per l'autorizzazione." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Il registro non è stato configurato correttamente sul server API. Motivo: " "%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Ricaricamento di %(serv)s non supportato" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Ricaricamento %(serv)s (pid %(pid)s) con segnale(%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" msgid "Request body must be a JSON array of operation objects." msgstr "" "Il corpo della richiesta deve essere un array JSON degli oggetti " "dell'operazione." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La risposta dal Keystone non contiene un endpoint Glance." msgid "Scope of image accessibility" msgstr "Ambito di accessibilità dell'immagine" msgid "Scope of namespace accessibility." msgstr "Ambito di accessibilità dello spazio dei nomi." #, python-format msgid "Server %(serv)s is stopped" msgstr "Il server %(serv)s è stato arrestato" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Creazione dell'operatore server non riuscita: %(reason)s." msgid "Signature verification failed" msgstr "Verifica firma non riuscita" msgid "Size of image file in bytes" msgstr "Dimensione del file di immagine in byte" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Alcuni tipi di risorsa consentono più di una coppia chiave / valore per " "istanza. Ad esempio, Cinder consente metadati immagine ed utente sui " "volumi. Solo i metadati delle proprietà dell'immagine vengono valutati da " "Nova (pianificazione o driver). Questa proprietà consente una destinazione " "dello spazio dei nomi per eliminare l'ambiguità." msgid "Sort direction supplied was not valid." msgstr "La direzione di ordinamento fornita non è valida." msgid "Sort key supplied was not valid." msgstr "La chiave di ordinamento fornita non è valida." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Specifica il prefisso da utilizzare per il tipo di risorsa fornito. " "Qualsiasi proprietà nello spazio dei nomi deve essere preceduta da un " "prefisso quando viene applicata ad un tipo di risorsa specificato. Deve " "includere un separatore di prefisso (ad esempio due punti :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Lo stato deve essere \"pending\", \"accepted\" o \"rejected\"." msgid "Status not specified" msgstr "Stato non specificato" msgid "Status of the image" msgstr "Stato dell'immagine" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Il passaggio di stato da %(cur_status)s a %(new_status)s non è consentito" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Arresto di %(serv)s in corso (pid %(pid)s) con segnale(%(sig)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "Valori supportati per l'attributo di immagine 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valori supportati per l'attributo di immagine 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Respawn soppresso come %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Ricevuto segnale SIGHUP di sistema." #, python-format msgid "Task '%s' is required" msgstr "Attività '%s' obbligatoria" msgid "Task does not exist" msgstr "L'attività non esiste" msgid "Task failed due to Internal Error" msgstr "Attività non riuscita a causa di un errore interno" msgid "Task was not configured properly" msgstr "L'attività non è stata configurata correttamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "L'attività con l'id fornito %(task_id)s non è stata trovata" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Il filtro \"changes-since\" non è più disponibile su v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Il file CA specificato %s non esiste" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "L'oggetto immagine %(image_id)s, in fase di creazione da questa attività " "%(task_id)s, non si trova più in uno stato che ne consenta ulteriori " "elaborazioni." msgid "The Store URI was malformed." msgstr "L'URI della memoria non era corretto." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Il file certificato specificato %s non esiste" msgid "The current status of this task" msgstr "Lo stato corrente di questa attività" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "L'unità in cui si trova la directory cache dell'immagine %(image_cache_dir)s " "non supporta xattr. Probabilmente è necessario modificare fstab e aggiungere " "l'opzione user_xattr nella riga appropriata per l'unità che ospita la " "directory cache." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "L'URI fornito non è valido. Specificare un URI valido dal seguente elenco di " "uri supportati %(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "L'immagine in entrata è troppo grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Il file chiave specificato %snon esiste" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di ubicazioni immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di membri dell'immagine consentito è stato superato in questa " "immagine. Tentato: %(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di proprietà immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di tag immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "L'ubicazione %(location)s esiste già" #, python-format msgid "The location data has an invalid ID: %d" msgstr "I dati dell'ubicazione hanno un ID non valido: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "La definizione di metadati %(record_type)s con nome=%(record_name)s non è " "eliminata. Altri record ancora fanno riferimento a tale definizione." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Lo spazio dei nomi della definizione di metadati =%(namespace_name)s già " "esiste." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "L'oggetto della definizione di metadati con nome=%(object_name)s non è stato " "trovato nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "La proprietà della definizione di metadati con nome=%(property_name)s non è " "stata trovata nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" "%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s già esiste." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" "%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s, non è " "stata trovata." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Il tipo-risorsa della definizione di metadati con nome=" "%(resource_type_name)s, non è stato trovato." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Il tag di definizione dei metadati con nome=%(name)s non è stato trovato " "nello spazio dei nomi=%(namespace_name)s." msgid "The parameters required by task, JSON blob" msgstr "I parametri richiesti dall'attività, blob JSON" msgid "The provided image is too large." msgstr "L'immagine fornita è troppo grande." msgid "The request returned 500 Internal Server Error." msgstr "La richiesta ha restituito 500 Errore interno del server." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La richiesta ha restituito 503 Servizio non disponibile 503. Ciò " "generalmente si verifica nel sovraccarico del servizio o altro tipo di " "interruzione temporanea." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La richiesta ha restituito 302 scelte multiple. Questo generalmente indica " "che non è stato incluso un indicatore di versione in un URI della " "richiesta.\n" "\n" "Restituito il corpo della risposta:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La richiesta ha restituito 413 Entità della richiesta troppo grande. Questo " "generalmente significa che il limite della velocità o la soglia della quota " "sono stati violati.\n" "\n" "Il corpo della risposta \n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La richiesta ha restituito uno stato imprevisto: %(status)s.\n" "\n" "Il corpo della risposta \n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "L'immagine richiesta è stata disattivata. Il download dei dati immagine non " "è consentito." msgid "The result of current task, JSON blob" msgstr "Il risultato dell'attività corrente, blob JSON" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "La dimensione dei dati %(image_size)s supererà il limite. %(remaining)s byte " "rimanenti." #, python-format msgid "The specified member %s could not be found" msgstr "Impossibile trovare il membro specificato %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Impossibile trovare l'oggetto di metadati %s specificato" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Impossibile trovare il tag di metadati %s specificato" #, python-format msgid "The specified namespace %s could not be found" msgstr "Impossibile trovare lo spazio dei nomi %s specificato" #, python-format msgid "The specified property %s could not be found" msgstr "Impossibile trovare la proprietà %s specificata" #, python-format msgid "The specified resource type %s could not be found " msgstr "Impossibile trovare il tipo di risorsa %s specificato " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " "'pending_delete' o 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " "'pending_delete' o 'deleted'." msgid "The status of this image member" msgstr "Lo stato di questo membro dell'immagine" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Il membro di destinazione %(member_id)s è già associato all'immagine " "%(image_id)s." msgid "The type of task represented by this content" msgstr "Il tipo di attività rappresentata da questo contenuto" msgid "The unique namespace text." msgstr "Il testo dello spazio dei nomi univoco." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Il nome utente semplice per lo spazio dei nomi. Utilizzato dalla UI se " "disponibile." msgid "There was an error configuring the client." msgstr "Si è verificato un errore durante la configurazione del client." msgid "There was an error connecting to a server" msgstr "Si è verificato un errore durante la connessione al server" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Questa operazione non è attualmente consentita nelle attività Glance. " "Vengono automaticamente eliminate al raggiungimento dell'ora in base alla " "proprietà expires_at." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Periodo di tempo, in ore, per cui l'attività prosegue dopo l'esito positivo " "o meno" msgid "Too few arguments." msgstr "Troppo pochi argomenti." msgid "URL to access the image file kept in external store" msgstr "URL per accedere al file di immagini tenuto nell'archivio esterno" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossibile creare il file pid %(pid)s. Eseguire come non-root?\n" "Ritorno a un file temporaneo; è possibile arrestare il servizio %(service)s " "utilizzando:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Impossibile filtrare mediante un operatore sconosciuto '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "" "Impossibile filtrare in base a un intervallo con un valore non numerico." msgid "Unable to filter on a unknown operator." msgstr "Impossibile filtrare su un operatore sconosciuto." msgid "Unable to filter using the specified operator." msgstr "Impossibile filtrare utilizzando l'operatore specificato." msgid "Unable to filter using the specified range." msgstr "Impossibile filtrare utilizzando l'intervallo specificato." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Impossibile trovare '%s' nella modifica dello schema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Impossibile trovare `op` in modifica schema JSON. Deve essere uno dei " "seguenti: %(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Impossibile aumentare il limite del descrittore di file. Eseguire come non-" "root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Impossibile caricare %(app_name)s dal file di configurazione %(conf_file)s.\n" "Ricevuto: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Impossibile caricare lo schema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Impossibile individuare il file di configurazione paste per %s." msgid "Unexpected body type. Expected list/dict." msgstr "Tipo di corpo imprevisto. Elenco/dizionario previsto." #, python-format msgid "Unexpected response: %s" msgstr "Risposta imprevista: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Strategia di autenticazione sconosciuta '%s'" #, python-format msgid "Unknown command: %s" msgstr "Comando sconosciuto: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versione della bozza dello schema JSON non riconosciuta" msgid "Virtual size of image in bytes" msgstr "Dimensione virtuale dell'immagine in byte" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "Entro 15 secondi il pid %(pid)s (%(file)s) verrà interrotto; terminato" msgid "You are not authenticated." msgstr "L'utente non è autenticato." msgid "You are not authorized to complete this action." msgstr "Non si è autorizzati a completare questa azione." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Non si è autorizzati a ricercare l'immagine %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Non si è autorizzati a ricercare i membri dell'immagine %s." msgid "You are not permitted to create image members for the image." msgstr "Non si è autorizzati a creare membri dell'immagine per l'immagine." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Non si è autorizzati a creare immagini di proprietà di '%s'." msgid "You do not own this image" msgstr "Non si possiede tale immagine" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Si è scelto di utilizzare nella connessione SSL ed è stato fornito un " "certificato, tuttavia non è stato fornito un parametro key_file o la " "variabile di ambiente GLANCE_CLIENT_KEY_FILE non è stata impostata" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Si è scelto di utilizzare SSL nella connessione e si è fornita una chiave, " "tuttavia non è stato fornito un parametro cert_file parameter o la variabile " "di ambiente GLANCE_CLIENT_CERT_FILE non è stata impostata" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() ha ricevuto l'argomento di parole chiave '%s' non previsto" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Impossibile passare da %(current)s a %(next)s in fase di aggiornamento " "(richiesto from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "le proprietà personalizzate (%(props)s) sono in conflitto con le proprietà " "di base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Su questa piattaforma non sono disponibili hub 'poll' e 'selects' eventlog" msgid "limit param must be an integer" msgstr "parametro limite deve essere un numero intero" msgid "limit param must be positive" msgstr "parametro limite deve essere positivo" msgid "md5 hash of image contents." msgstr "hash md5 del contenuto dell'immagine. " #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() ha ricevuto parole chiave %s non previste" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "impossibile avviare %(serv)s. Si è verificato l'errore: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id è troppo lungo, dimensione max %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/ja/0000775000175000017500000000000000000000000016140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000017725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/ja/LC_MESSAGES/glance.po0000664000175000017500000020075200000000000021524 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata # Shu Muto , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-09 10:00+0000\n" "Last-Translator: Shu Muto \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "最後の RPC 呼び出しで %(cls)s 例外が発生しました: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "イメージ %(i_id)s のメンバーリストで %(m_id)s が見つかりません。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) が実行中..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s は既に実行されている可能性があります: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s の %(task_id)s が正しく設定されていません。ファイルシステムスト" "アをロードできませんでした。" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s の %(task_id)s が適切に設定されていません。作業ディレクトリー " "%(work_dir)s がありません" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(serv)s の %(verb)s 中" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(conf)s を使用して %(serv)s を %(verb)s 中" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s host:port のペアを指定してください。host は IPv4 アドレス、IPv6 アドレス、" "ホスト名、または FQDN です。IPv6 アドレスを使用する場合は、アドレスを大括弧で" "囲んでポートと区別してください (例えば、\"[fe80::a:b:c]:9876\")。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s に 4 バイトの Unicode 文字が含まれていてはなりません。" #, python-format msgid "%s is already stopped" msgstr "%s は既に停止しています" #, python-format msgid "%s is stopped" msgstr "%s は停止しています" msgid "'node_staging_uri' is not set correctly. Could not load staging store." msgstr "" "'node_staging_uri' が正しく設定されていません。ステージングストアをロードでき" "ませんでした。" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "keystone 認証戦略が有効な場合は、--os_auth_url オプションまたはOS_AUTH_URL 環" "境変数が必要です\n" msgid "A body is not expected with this request." msgstr "この要求では本文は予期されません。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s のメタデータ定義オブジェクトは、namespace=" "%(namespace_name)s に既に存在します。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s のメタデータ定義プロパティーは、namespace=" "%(namespace_name)s に既に存在します。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "name=%(resource_type_name)s のメタデータ定義リソースタイプは、既に存在しま" "す。" #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s. (Please note that metadata tag names are case " "insensitive)." msgstr "" "name=%(name)s のメタデータタグは既に namespace=%(namespace_name)s に存在しま" "す。(メタデータのタグ名は大文字小文字を区別しないことに注意してください。)" msgid "A set of URLs to access the image file kept in external store" msgstr "" "外部ストアに保持されているイメージファイルにアクセスするための一連の URL" msgid "Amount of disk space (in GB) required to boot image." msgstr "イメージのブートに必要なディスクスペースの量 (GB)" msgid "Amount of ram (in MB) required to boot image." msgstr "イメージのブートに必要な RAM の量 (MB)" msgid "An identifier for the image" msgstr "イメージの ID" msgid "An identifier for the image member (tenantId)" msgstr "イメージメンバーの ID (テナント ID)" msgid "An identifier for the owner of this task" msgstr "このタスクの所有者 ID" msgid "An identifier for the task" msgstr "タスクの ID" msgid "An image file url" msgstr "イメージファイルの URL" msgid "An image schema url" msgstr "イメージスキーマの URL" msgid "An image self url" msgstr "イメージ自体の URL" msgid "An import task exception occurred" msgstr "インポートタスクの例外が発生しました" msgid "An object with the same identifier already exists." msgstr "同じ ID のオブジェクトが既に存在します。" msgid "An object with the same identifier is currently being operated on." msgstr "現在、同じ ID を持つオブジェクトが操作されています。" msgid "An object with the specified identifier was not found." msgstr "指定された ID を持つオブジェクトが見つかりませんでした。" msgid "An unknown exception occurred" msgstr "不明な例外が発生しました" msgid "An unknown task exception occurred" msgstr "不明なタスク例外が発生しました" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "属性 '%(property)s' は読み取り専用です。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "属性 '%(property)s' は予約されています。" #, python-format msgid "Attribute '%s' is read-only." msgstr "属性 '%s' は読み取り専用です。" #, python-format msgid "Attribute '%s' is reserved." msgstr "属性 '%s' は予約されています。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "キューに入れられたイメージについてのみ属性 container_format を置換できます。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "キューに入れられたイメージについてのみ属性 disk_format を置換できます。" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "URL %(url)s の認証サービスが見つかりません。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "認証エラー - トークンがファイルアップロード中に失効した可能性があります。 %s " "へのイメージデータを削除します。" msgid "Authorization failed." msgstr "許可が失敗しました。" msgid "Available categories:" msgstr "使用可能カテゴリー:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "正しくない \"%s\" 照会フィルター形式。ISO 8601 DateTime 表記を使用してくださ" "い。" #, python-format msgid "Bad header: %(header_name)s" msgstr "ヘッダーが正しくありません: %(header_name)s" msgid "Body expected in request." msgstr "要求の本体が必要です。" msgid "Cannot be a negative value" msgstr "負の値にすることはできません" msgid "Cannot be a negative value." msgstr "負の値にすることはできません。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "イメージ %(key)s '%(value)s' を整数に変換できません。" msgid "Cannot remove last location in the image." msgstr "イメージ内の最後の場所は削除できません。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "イメージ %(image_id)s のデータを保存できません: %(error)s" msgid "Cannot set locations to empty list." msgstr "空のリストに場所を設定することはできません。" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "チェックサムの検証に失敗しました。イメージ '%s' のキャッシュを打ち切りまし" "た。" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "接続エラー/URL %(url)s の認証サービスに対する正しくない要求。" #, python-format msgid "Constructed URL: %s" msgstr "URL を構成しました: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "イメージ %(image_id)s のイメージダウンロードが壊れています" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30 秒間の試行後に %(host)s:%(port)s にバインドできませんでした" msgid "Could not find OVF file in OVA archive file." msgstr "OVA アーカイブファイル内に OVF ファイルが見つかりませんでした。" #, python-format msgid "Could not find metadata object %s" msgstr "メタデータオブジェクト %s が見つかりませんでした" #, python-format msgid "Could not find metadata tag %s" msgstr "メタデータタグ %s が見つかりませんでした" #, python-format msgid "Could not find property %s" msgstr "プロパティー %s が見つかりませんでした" #, python-format msgid "Could not find task %s" msgstr "タスク %s が見つかりませんでした" #, python-format msgid "Could not update image: %s" msgstr "イメージを更新できませんでした: %s" #, python-format msgid "Couldn't create metadata namespace: %s" msgstr "メタデータ名前空間 %s を作成できませんでした" #, python-format msgid "Couldn't create metadata object: %s" msgstr "メタデータオブジェクト %s を作成できませんでした" #, python-format msgid "Couldn't create metadata property: %s" msgstr "メタデータプロパティ %s を作成できませんでした" #, python-format msgid "Couldn't create metadata tag: %s" msgstr "メタデータタグ %s を作成できませんでした" #, python-format msgid "Couldn't update metadata namespace: %s" msgstr "メタデータ名前空間 %s を更新できませんでした" #, python-format msgid "Couldn't update metadata object: %s" msgstr "メタデータオブジェクト %s を更新できませんでした" #, python-format msgid "Couldn't update metadata property: %s" msgstr "メタデータプロパティ %s を更新できませんでした" #, python-format msgid "Couldn't update metadata tag: %s" msgstr "メタデータタグ %s を更新できませんでした" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "現在、複数のディスクを含む OVA パッケージはサポートされません。" msgid "Custom property should not be greater than 255 characters." msgstr "カスタムプロパティは 255 文字より大きくてはいけません。" msgid "Data supplied was not valid." msgstr "指定されたデータが無効でした。" msgid "" "Database contraction did not run. Database contraction cannot be run before " "data migration is complete. Run data migration using \"glance-manage db " "migrate\"." msgstr "" "データベースの締結が実行されていません。データベースの締結はデータの移行が完" "了する前には実行できません。まず、\"glance-manage db migrate\" を使用してデー" "タの移行を実行してください。" msgid "" "Database contraction did not run. Database contraction cannot be run before " "database expansion. Run database expansion first using \"glance-manage db " "expand\"" msgstr "" "データベースの締結が実行されていません。データベースの締結はデータベースの展" "開の前には実行できません。まず、\"glance-manage db expand\" を使用してデータ" "ベースの展開を実行してください。" msgid "" "Database contraction failed. Couldn't find head revision of contract branch." msgstr "" "データベースの締結に失敗しました。締結ブランチのヘッドリビジョンが見つかりま" "せん。" msgid "" "Database expansion failed. Couldn't find head revision of expand branch." msgstr "" "データベースの展開に失敗しました。展開ブランチのヘッドリビジョンが見つかりま" "せん。" #, python-format msgid "" "Database expansion failed. Database expansion should have brought the " "database version up to \"%(e_rev)s\" revision. But, current revisions are: " "%(curr_revs)s " msgstr "" "データベースの展開に失敗しました。データベースの展開によって、データベースの" "バージョンがリビジョン \"%(e_rev)s\" になっているはずです。 しかし、現在のリ" "ビジョンは %(curr_revs)s です。" msgid "" "Database is either not under migration control or under legacy migration " "control, please run \"glance-manage db sync\" to place the database under " "alembic migration control." msgstr "" "データベースは移行制御下にも従来の移行制御下にもないため、\"glance-manage db " "sync\" を実行して移行制御下に置いてください。" msgid "Database is synced successfully." msgstr "データベースの同期に成功しました。" msgid "Database is up to date. No migrations needed." msgstr "データベースは最新です。移行は必要ありません。" msgid "Database is up to date. No upgrades needed." msgstr "データベースは最新です。アップグレードは必要ありません。" msgid "Date and time of image member creation" msgstr "イメージメンバーの作成日時" msgid "Date and time of image registration" msgstr "イメージ登録日時" msgid "Date and time of last modification of image member" msgstr "イメージメンバーの最終変更日時" msgid "Date and time of namespace creation" msgstr "名前空間の作成日時" msgid "Date and time of object creation" msgstr "オブジェクトの作成日時" msgid "Date and time of resource type association" msgstr "リソースタイプ関連付けの日時" msgid "Date and time of tag creation" msgstr "タグの作成日時" msgid "Date and time of the last image modification" msgstr "イメージの最終変更日時" msgid "Date and time of the last namespace modification" msgstr "名前空間の最終変更日時" msgid "Date and time of the last object modification" msgstr "オブジェクトの最終変更日時" msgid "Date and time of the last resource type association modification" msgstr "リソースタイプ関連付けの最終変更日時" msgid "Date and time of the last tag modification" msgstr "タグの最終変更日時" msgid "Datetime when this resource was created" msgstr "このリソースが作成された日時" msgid "Datetime when this resource was updated" msgstr "このリソースが更新された日時" msgid "Datetime when this resource would be subject to removal" msgstr "このリソースが削除される日時" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "イメージをアップロードしようとしましたが、割り当て量を超えてしまうため、拒否" "されています: %s" msgid "Descriptive name for the image" msgstr "イメージの記述名" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "ドライバー %(driver_name)s を正しく設定できませんでした。理由: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "要求のデコードのエラー。URL または要求本文に Glance でデコードできない文字が" "含まれていました。" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "イメージ %(image_id)s のメンバーの取得中のエラー: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "ストア設定にエラーがあります。ストアへのイメージの追加が無効になっています。" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "次の形式でメンバーを予期: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "次の形式で状態を予期: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "削除するイメージ %(image_id)s が見つかりませんでした" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "削除するリソースタイプ %(resourcetype)s が見つかりませんでした" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "イメージキャッシュデータベースを初期化できませんでした。受け取ったエラー: %s" #, python-format msgid "Failed to read %s from config" msgstr "設定から %s を読み取ることができませんでした" #, python-format msgid "Failed to sync database: ERROR: %s" msgstr "データベースの同期に失敗しました: エラー: %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP エラーが発生したため、イメージ %(image_id)s のイメージデータのアップロー" "ドに失敗しました: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "内部エラーが発生したため、イメージ %(image_id)s のイメージデータをアップロー" "ドできませんでした: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "ファイル %(path)s に無効なバッキングファイル %(bfile)s があります。打ち切りま" "す。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "ファイルベースのインポートは許可されません。イメージデータの非ローカルソース" "を使用してください。" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "要求は禁止されています。メタデータ定義 namespace=%s を表示できません" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "要求を禁止しています。タスク %s は表示されません" msgid "Format of the container" msgstr "コンテナーの形式" msgid "Format of the disk" msgstr "ディスクの形式" #, python-format msgid "Host \"%s\" is not valid." msgstr "ホスト \"%s\" が無効です。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "ホストおよびポート \"%s\" が無効です。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "適切な場合 (通常は障害発生時) にのみ、人間が読み取れる情報メッセージが含まれ" "ます" msgid "If true, image will not be deletable." msgstr "true の場合、イメージは削除可能になりません。" msgid "If true, namespace will not be deletable." msgstr "true の場合、名前空間は削除可能になりません。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "イメージ %(id)s は使用中のため削除できませんでした: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "アップロード後にイメージ %(image_id)s が見つかりませんでした。このイメージは" "アップロード中に削除された可能性があります: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "イメージ %(image_id)s は保護されているため、削除できません。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "アップロード後にイメージ %s が見つかりませんでした。イメージはアップロード中" "に削除された可能性があります。アップロードされたチャンクをクリーンアップ中で" "す。" #, python-format msgid "Image %s not found." msgstr "イメージ %s が見つかりません。" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "イメージがストレージクォータを超えています: %s" msgid "Image id is required." msgstr "イメージ ID が必要です。" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "イメージ %(id)s のメンバー数がイメージメンバー上限を超えました: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s から %(new_status)s へのイメージのステータス移行は許可されませ" "ん" #, python-format msgid "Image storage media is full: %s" msgstr "イメージストレージのメディアがフルです: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "イメージ %(id)s のイメージタグ上限を超えました: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "イメージのアップロード問題: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "指定された ID %(image_id)s を持つイメージが見つかりませんでした" msgid "Import request requires a 'method' field." msgstr "インポートリクエストは、'method' フィールドが必要です。" msgid "Import request requires a 'name' field." msgstr "インポートリクエストは、'name' フィールドが必要です。" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "認証ストラテジーが誤っています。\"%(expected)s\" が必要ですが、\"%(received)s" "\" を受け取りました" #, python-format msgid "Incorrect request: %s" msgstr "正しくない要求: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "入力に '%(key)s' フィールドが含まれていません" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "イメージストレージのメディアに対する許可が不十分です: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "このリソースの JSON ポインターは無効です: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 設定ファイルの設定が無効です。" msgid "Invalid configuration in property protection file." msgstr "プロパティー保護ファイルで設定が無効です。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "コンテンツタイプ %(content_type)s が無効です" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "無効なフィルター値 %s。引用符が組みになっていません。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "無効なフィルター値 %s。終了引用符の後にコンマがありません。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "無効なフィルター値 %s。開始引用符の前にコンマがありません。" #, python-format msgid "Invalid int value for max_rows: %(max_rows)s" msgstr "max_rows の整数値 %(max_rows)s は無効です。" msgid "Invalid location" msgstr "無効な場所" #, python-format msgid "Invalid location: %s" msgstr "無効な場所: %s" msgid "Invalid locations" msgstr "無効な場所" #, python-format msgid "Invalid locations: %s" msgstr "無効な場所: %s" msgid "Invalid marker format" msgstr "マーカーフォーマットが無効です" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "無効な操作: `%(op)s`。以下のいずれかでなければなりません: %(available)s。" msgid "Invalid position for adding a location." msgstr "場所の追加位置が無効です。" msgid "Invalid position for removing a location." msgstr "場所の削除位置が無効です。" msgid "Invalid service catalog json." msgstr "無効なサービスカタログ JSON ファイル。" #, python-format msgid "Invalid sort direction: %s" msgstr "無効なソート方向: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "ソートキー %(sort_key)s は無効です。 %(available)s のいずれかでなければなりま" "せん。" #, python-format msgid "Invalid status value: %s" msgstr "状態の値が無効です: %s" #, python-format msgid "Invalid status: %s" msgstr "無効な状況: %s" #, python-format msgid "Invalid type value: %s" msgstr "タイプ値が無効です: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "無効な更新です。結果として、同じ名前 %s でメタデータ定義名前空間が重複しま" "す。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義オブジェクトが重複します。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義オブジェクトが重複します。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義プロパティーが重複します。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "パラメーター '%(param)s' の値 '%(value)s' が無効です: %(extra_msg)s" #, python-format msgid "" "Invalid value '%s' for 'protected' filter. Valid values are 'true' or " "'false'." msgstr "" "'protected' フィルターの値 '%s' が無効です。許容される値は 'true' または " "'false' です。" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "オプション %(option)s の値が無効です: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "無効な可視性の値: %s" #, python-format msgid "It's not allowed to add locations if image status is %s." msgstr "イメージの状態が %s の場合、場所を追加できません。" msgid "It's not allowed to add locations if locations are invisible." msgstr "場所が表示されない場合、場所を追加できません。" #, python-format msgid "It's not allowed to remove locations if image status is %s." msgstr "イメージの状態が %s の場合、場所を削除できません。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "場所が表示されない場合、場所を削除できません。" #, python-format msgid "It's not allowed to replace locations if image status is %s." msgstr "イメージの状態が %s の場合、場所を変更できません。" msgid "It's not allowed to update locations if locations are invisible." msgstr "場所が表示されない場合、場所を更新できません。" msgid "List of strings related to the image" msgstr "イメージに関連する文字列のリスト" msgid "Malformed JSON in request body." msgstr "要求本体の JSON の形式が誤りです。" msgid "Maximal age is count of days since epoch." msgstr "最長存続時間は、エポック以降の日数です。" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "最大リダイレクト数 (%(redirects)s) を超えました。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "イメージ %(image_id)s のメンバー %(member_id)s が重複しています" msgid "Member can't be empty" msgstr "「メンバー」は空にできません" msgid "Member to be added not specified" msgstr "追加するメンバーが指定されていません" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義名前空間 %(namespace)s は保護されており、削除できません。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s のメタデータ定義名前空間が見つかりません" #, python-format msgid "Metadata definition namespace=%(namespace_name)s was not found." msgstr "" "namespace=%(namespace_name)s のメタデータ定義名前空間が見つかりませんでした。" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義オブジェクト %(object_name)s は保護されており、削除できません。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s のメタデータ定義オブジェクトが見つかりません" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義プロパティー %(property_name)s は保護されており、削除できませ" "ん。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s のメタデータ定義プロパティーが見つかりません" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "メタデータ定義リソースタイプ %(resource_type_name)s はシードシステムタイプで" "あり、削除できません。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "メタデータ定義リソースタイプ関連付け %(resource_type)s は保護されており、削除" "できません。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "メタデータ定義タグ %(tag_name)s は保護されており、削除できません。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s のメタデータ定義タグが見つかりません" #, python-format msgid "Missing required credential: %(required)s" msgstr "必須の資格情報がありません: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "領域 %(region)s に対して複数の「イメージ」サービスが一致します。これは一般" "に、領域が必要であるのに、領域を指定していないことを意味します。" msgid "Must supply a non-negative value for age." msgstr "存続期間には負ではない値を指定してください。" #, python-format msgid "No image found with ID %s" msgstr "ID が %s であるイメージは見つかりません" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "イメージ %(img)s 内で ID が %(loc)s の場所は見つかりません" #, python-format msgid "Not allowed to create members for image %s." msgstr "イメージ %s のメンバーの作成は許可されていません。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "状況が「%s」であるイメージの非アクティブ化は許可されていません" #, python-format msgid "Not allowed to delete members for image %s." msgstr "イメージ %s のメンバーの削除は許可されていません。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "イメージ %s のタグの削除は許可されていません。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "状況が「%s」であるイメージの再アクティブ化は許可されていません" #, python-format msgid "Not allowed to update members for image %s." msgstr "イメージ %s のメンバーの更新は許可されていません。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "イメージ %s のタグの更新は許可されていません。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "イメージ %(image_id)s ではイメージデータのアップロードは許可されません: " "%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "ソート方向の数がソートキーの数に一致しません" msgid "OVA extract is limited to admin" msgstr "OVA 抽出が実行できるのは管理者のみです" msgid "Old and new sorting syntax cannot be combined" msgstr "新旧のソート構文を結合することはできません" msgid "Only shared images have members." msgstr "共有イメージのみがメンバーを持ちます。" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "操作 \"%s\" には \"value\" という名前のメンバーが必要です。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" "ンバーを正確に 1 つだけ含める必要があります。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" "ンバーを 1 つしか含められません。" msgid "Operations must be JSON objects." msgstr "操作は JSON オブジェクトでなければなりません。" #, python-format msgid "Original locations is not empty: %s" msgstr "元の場所は空ではありません: %s" msgid "Owner can't be updated by non admin." msgstr "管理者以外は所有者を更新できません。" msgid "Owner of the image" msgstr "イメージの所有者" msgid "Owner of the namespace." msgstr "名前空間の所有者。" msgid "Param values can't contain 4 byte unicode." msgstr "Param 値に 4 バイトの Unicode が含まれていてはなりません。" msgid "Placed database under migration control at revision:" msgstr "移行制御下にある配置されたデータベースはリビジョン:" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "ポインター `%s` に、認識されているエスケープシーケンスの一部ではない \"~\" が" "含まれています。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "ポインター `%s` に隣接する \"/\" が含まれています。" #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "ポインター `%s` に有効なトークンが含まれていません。" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "ポインター `%s` の先頭が \"/\" ではありません。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "ポインター `%s` の末尾が \"/\" です。" #, python-format msgid "Port \"%s\" is not valid." msgstr "ポート \"%s\" が無効です。" #, python-format msgid "Process %d not running" msgstr "プロセス %d は実行されていません" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "データの保存前にプロパティー %s を設定する必要があります。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "プロパティー %(property_name)s の先頭が、想定されるリソースタイプ関連付けのプ" "レフィックス \"%(prefix)s\" ではありません。" #, python-format msgid "Property %s already present." msgstr "プロパティー %s は既に存在しています。" #, python-format msgid "Property %s does not exist." msgstr "プロパティー %s は存在しません。" #, python-format msgid "Property %s may not be removed." msgstr "プロパティー %s は削除できません。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "データの保存前にプロパティー %s を設定する必要があります。" msgid "Property names can't contain 4 byte unicode." msgstr "プロパティー名に 4 バイトの Unicode が含まれていてはなりません。" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "" "指定されたオブジェクトがスキーマ '%(schema)s' と一致しません: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "指定されたタスク状況はサポートされていません: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "指定されたタスクタイプはサポートされていません: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "分かりやすい名前空間の説明が提供されます。" msgid "Purge command failed, check glance-manage logs for more details." msgstr "" "Purge コマンドが失敗しました。詳細は glance-manage のログを確認して下さい。" msgid "Received invalid HTTP redirect." msgstr "無効な HTTP リダイレクトを受け取りました。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "許可のために %(uri)s にリダイレクトしています。" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "レジストリーが API サーバーで正しく設定されていませんでした。理由: %(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s の再ロードはサポートされていません" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により再ロード中" #, python-format msgid "Removing stale pid file %s" msgstr "失効した pid ファイル %s を削除中" msgid "Request body must be a JSON array of operation objects." msgstr "要求本文は、操作オブジェクトの JSON 配列でなければなりません。" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone からの応答に Glance エンドポイントが含まれていません。" msgid "Rolling upgrades are currently supported only for MySQL and Sqlite" msgstr "" "ローリングアップグレードは現在 MySQL と Sqlite のみがサポートされています。" msgid "Scope of image accessibility" msgstr "イメージのアクセス可能性の範囲" msgid "Scope of namespace accessibility." msgstr "名前空間アクセシビリティーの範囲。" msgid "Scrubber encountered an error while trying to fetch scrub jobs." msgstr "スクラブジョブの取得を試行中にエラーが発生しました。" #, python-format msgid "Server %(serv)s is stopped" msgstr "サーバー %(serv)s は停止しています" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "サーバーワーカーの作成に失敗しました: %(reason)s" msgid "Signature verification failed" msgstr "シグニチャーの検証が失敗しました" msgid "Size of image file in bytes" msgstr "イメージファイルのサイズ (バイト)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "一部のリソースタイプでは、インスタンスごとに複数のキー/値のペアが許可されてい" "ます。例えば、Cinder はボリューム上のユーザーおよびイメージメタデータを許可し" "ています。イメージプロパティーメタデータのみ、Nova (スケジュールまたはドライ" "バー) によって評価されます。このプロパティーによって、名前空間ターゲットから" "あいまいさを排除できます。" msgid "Sort direction supplied was not valid." msgstr "指定されたソート方向が無効でした。" msgid "Sort key supplied was not valid." msgstr "指定されたソートキーが無効でした。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定されたリソースタイプに使用するプレフィックスを指定します。名前空間にある" "プロパティーはすべて、指定されたリソースタイプに適用されるときに、このプレ" "フィックスが先頭に付けられます。コロン (:) などのプレフィックス区切り文字を組" "み込む必要があります。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "状況は、\"保留中\"、\"受諾\"、または\"拒否\" でなければなりません。" msgid "Status not specified" msgstr "状況が指定されていません" msgid "Status of the image" msgstr "イメージの状態" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "%(cur_status)s から %(new_status)s への状況遷移は許可されません" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により停止中" msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' イメージ属性に対してサポートされる値" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' イメージ属性に対してサポートされる値" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s として抑制された再作成は %(rsn)s でした。" msgid "System SIGHUP signal received." msgstr "システム SIGHUP シグナルを受信しました。" #, python-format msgid "Task '%s' is required" msgstr "タスク '%s' が必要です" msgid "Task does not exist" msgstr "タスクが存在しません" msgid "Task failed due to Internal Error" msgstr "内部エラーが原因でタスクが失敗しました" msgid "Task was not configured properly" msgstr "タスクが正しく設定されませんでした" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "指定された id %(task_id)s のタスクは見つかりませんでした" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "\"changes-since\" フィルターは v2 上で使用できなくなりました。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "指定した CA ファイル %s は存在しません" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "このタスク %(task_id)s で作成されているイメージ %(image_id)s オブジェクトは以" "降の処理に有効な状況ではなくなりました。" msgid "The Store URI was malformed." msgstr "ストア URI の形式に誤りがありました。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "指定した証明書ファイル %s は存在しません" msgid "The current status of this task" msgstr "このタスクの現行状況" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "イメージキャッシュディレクトリー %(image_cache_dir)s が格納されているデバイス" "では xattr はサポートされません。fstab を編集して、キャッシュディレクトリーが" "格納されているデバイスの該当する行に user_xattr オプションを追加しなければな" "らない可能性があります。" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "指定した URI が無効です。次のサポートされている URI のリストから、有効な URI " "を指定してください: %(supported)s" #, python-format msgid "The image %s has data on staging" msgstr "イメージ %s はステージングにデータがあります" #, python-format msgid "" "The image %s is already present on the target, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the target server." msgstr "" "イメージ %s は既にターゲット上にありますが、検査では見つかりませんでした。こ" "れは、ターゲットサーバー上のすべてのイメージを表示する許可を持っていないこと" "を示します。" #, python-format msgid "The incoming image is too large: %s" msgstr "入力イメージが大きすぎます: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "指定した鍵ファイル %s は存在しません" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージの場所の数の制限を超えました。試行: %(attempted)s、最大: " "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "このイメージに対して許可されるイメージメンバー数の制限を超えました。試行: " "%(attempted)s、最大: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージプロパティー数の制限を超えました。試行: %(attempted)s、最" "大: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージタグ数の制限を超えました。試行: %(attempted)s、最大: " "%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "場所 %(location)s は既に存在します" #, python-format msgid "The location data has an invalid ID: %d" msgstr "場所データの ID が無効です: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "name=%(record_name)s のメタデータ定義 %(record_type)s は削除されていません。" "他のレコードがまだこのメタデータ定義を参照しています。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "メタデータ定義 namespace=%(namespace_name)s は既に存在します。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s のメタデータ定義オブジェクトが、namespace=" "%(namespace_name)s に見つかりませんでした。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s のメタデータ定義プロパティーは、namespace=" "%(namespace_name)s に見つかりませんでした。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" "タデータ定義リソースタイプ関連付けは、既に存在します。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" "タデータ定義リソースタイプ関連付けが見つかりませんでした。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "name=%(resource_type_name)s のメタデータ定義リソースタイプが見つかりませんで" "した。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s のメタデータ定義タグが namespace=%(namespace_name)s に見つかり" "ませんでした。" msgid "The parameters required by task, JSON blob" msgstr "タスクによって要求されるパラメーター、JSON blob" msgid "The provided image is too large." msgstr "指定されたイメージが大きすぎます。" msgid "The request returned 500 Internal Server Error." msgstr "要求で「500 Internal Server Error」が返されました。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "要求で「503 Service Unavailable」が返されました。これは一般に、サービスの過負" "荷または他の一時的な障害時に起こります。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "要求が「302 Multiple Choices」を返しました。これは通常、要求 URI にバージョン" "標識を含めなかったことを意味します。\n" "\n" "返された応答の本体:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求で「413 Request Entity Too Large」が返されました。これは一般に、速度制限" "または割り当て量のしきい値に違反したことを意味します。\n" "\n" "応答本体:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求で予期しない状況が返されました: %(status)s。\n" "\n" "応答本体:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "要求されたイメージは非アクティブ化されています。イメージデータのダウンロード" "は禁止されています。" msgid "The result of current task, JSON blob" msgstr "現行タスクの結果、JSON blob" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "データのサイズ %(image_size)s が制限を超えます。%(remaining)s バイト残されて" "います。" #, python-format msgid "The specified member %s could not be found" msgstr "指定されたメンバー %s は見つかりませんでした" #, python-format msgid "The specified metadata object %s could not be found" msgstr "指定されたメタデータオブジェクト %s は見つかりませんでした" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "指定されたメタデータタグ %s が見つかりませんでした" #, python-format msgid "The specified namespace %s could not be found" msgstr "指定された名前空間 %s は見つかりませんでした" #, python-format msgid "The specified property %s could not be found" msgstr "指定されたプロパティー %s は見つかりませんでした" #, python-format msgid "The specified resource type %s could not be found " msgstr "指定されたリソースタイプ %s は見つかりませんでした" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "削除されたイメージの場所の状況は「pending_delete」または「deleted」にのみ設定" "できます" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "削除されたイメージの場所の状況は「pending_delete」または「deleted」にのみ設定" "できます。" msgid "The status of this image member" msgstr "このイメージメンバーの状況" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "ターゲットメンバー %(member_id)s はイメージ %(image_id)s に既に関連付けられて" "います。" msgid "The type of task represented by this content" msgstr "このコンテンツによって表されるタスクのタイプ" msgid "The unique namespace text." msgstr "固有の名前空間テキスト。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "名前空間の分かりやすい名前。存在する場合は、UI によって使用されます。" msgid "There was an error configuring the client." msgstr "クライアントの設定中にエラーが発生しました。" msgid "There was an error connecting to a server" msgstr "サーバーへの接続中にエラーが発生しました" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "この操作は、Glance タスクでは現在許可されていません。これらのタスクは、" "expires_at プロパティーに基づき、時間に達すると自動的に削除されます。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "成功または失敗の後でタスクが存続する時間 (時)" msgid "Too few arguments." msgstr "引数が少なすぎます。" #, python-format msgid "" "Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" msgstr "" "合計サイズは、%(img_count)d) 個のイメージで %(size)d バイト (%(human_size)s) " "です" msgid "URL to access the image file kept in external store" msgstr "外部ストアに保持されているイメージファイルにアクセスするための URL" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "pid ファイル %(pid)s を作成できません。非ルートとして実行しますか?\n" "一時ファイルにフォールバック中。次を使用して %(service)s サービスを\n" "停止できます: %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "不明な演算子 '%s' によってフィルター処理を行うことができません。" msgid "Unable to filter on a range with a non-numeric value." msgstr "非数値を含む範囲ではフィルタリングできません。" msgid "Unable to filter on a unknown operator." msgstr "不明な演算子に対してフィルター処理を行うことができません。" msgid "Unable to filter using the specified operator." msgstr "指定された演算子を使用してフィルター処理ができません。" msgid "Unable to filter using the specified range." msgstr "指定された範囲ではフィルタリングできません。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON スキーマの変更で '%s' が見つかりません" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON スキーマの変更で `op` が見つかりません。以下のいずれかでなければなりませ" "ん: %(available)s。" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "ファイル記述子制限を増加できません。非ルートとして実行しますか?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "設定ファイル %(conf_file)s から %(app_name)s をロードできません。\n" "受け取ったエラー: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "スキーマをロードできません: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s の paste 設定ファイルが見つかりません。" msgid "Unexpected body type. Expected list/dict." msgstr "予期しない本文タイプ。予期されたのはリストまたは辞書です。" #, python-format msgid "Unexpected response: %s" msgstr "予期しない応答: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "不明な認証ストラテジー '%s'" #, python-format msgid "Unknown command: %s" msgstr "不明なコマンド: %s" #, python-format msgid "Unknown import method name '%s'." msgstr "不明なインポートメソッド名 '%s' です。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" msgid "Unrecognized JSON Schema draft version" msgstr "認識されない JSON スキーマのドラフトバージョン" #, python-format msgid "Upgraded database to: %(v)s, current revision(s): %(r)s" msgstr "" "データベースが %(v)s にアップグレードされました。現在のリビジョン: %(r)s" msgid "Upgraded database, current revision(s):" msgstr "データベースがアップグレードされました。現在のリビジョン:" msgid "Virtual size of image in bytes" msgstr "イメージの仮想サイズ (バイト)" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "pid %(pid)s (%(file)s) が停止するまで 15 秒お待ちください。中断中です" msgid "You are not authenticated." msgstr "認証されていません。" #, python-format msgid "You are not authorized to complete %(action)s action." msgstr "%(action)s アクションの実行を許可されていません。" msgid "You are not authorized to complete this action." msgstr "このアクションの実行を許可されていません。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "イメージ %s を調べる権限がありません。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "イメージ %s のメンバーを調べる権限がありません。" msgid "You are not permitted to create image members for the image." msgstr "そのイメージのイメージメンバーの作成は許可されていません。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s' によって所有されているイメージの作成は許可されていません。" msgid "You do not own this image" msgstr "このイメージを所有していません" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "接続時に SSL を使用するよう選択し、証明書を指定しましたが、key_file パラメー" "ターを指定しなかったか、GLANCE_CLIENT_KEY_FILE 環境変数を設定しませんでした" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "接続時に SSL を使用するよう選択し、鍵を指定しましたが、cert_file パラメーター" "を指定しなかったか、GLANCE_CLIENT_CERT_FILE 環境変数を設定しませんでした" msgid "" "Your database is not up to date. Your first step is to run `glance-manage db " "expand`." msgstr "" "データベースが最新ではありません。最初のステップは、`glance-manage db " "expand` です。" msgid "" "Your database is not up to date. Your next step is to run `glance-manage db " "contract`." msgstr "" "データベースが最新ではありません。次のステップは、`glance-manage db " "contract` です。" msgid "" "Your database is not up to date. Your next step is to run `glance-manage db " "migrate`." msgstr "" "データベースが最新ではありません。次のステップは、`glance-manage db migrate` " "です。" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() で予期しないキーワード引数 '%s' が得られました" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "更新で %(current)s から %(next)s に移行できません (from_state=%(from)s が必" "要)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "カスタムプロパティー (%(props)s) が基本プロパティーと競合しています" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "このプラットフォームでは eventlet の「poll」ハブも「selects」ハブも使用できま" "せん" msgid "limit param must be an integer" msgstr "limit パラメーターは整数でなければなりません" msgid "limit param must be positive" msgstr "limit パラメーターは正でなければなりません" msgid "md5 hash of image contents." msgstr "イメージコンテンツの MD5 ハッシュ。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() で予期しないキーワード %s が得られました" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s を起動できません。受け取ったエラー: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id が長すぎます。最大サイズは %s です" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/ko_KR/0000775000175000017500000000000000000000000016553 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000020340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/ko_KR/LC_MESSAGES/glance.po0000664000175000017500000015633500000000000022146 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # HyunWoo Jo , 2014 # Andreas Jaeger , 2016. #zanata # Jinseok Kim , 2021. #zanata # Sion Shin , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-08-26 06:18+0000\n" "Last-Translator: Sion Shin \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "%(cls)s 예외가 마지막 rpc 호출에서 발생: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "이미지 %(i_id)s의 멤버 목록에서 %(m_id)s을(를) 찾을 수 없습니다." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s(pid %(pid)s)이(가) 실행 중..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s이(가) 이미 실행 중으로 표시됨: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 파일 시스템 저장소" "를 로드할 수 없습니다." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 누락 작업 디렉토" "리: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(serv)s을(를) %(verb)s 중" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(serv)s에서 %(conf)s과(와) 함께 %(verb)s 중" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 호스트:포트 쌍을 지정하십시오. 여기서 호스트는 IPv4 주소, IPv6 주소, 호스" "트 이름 또는 FQDN입니다. IPv6 주소를 사용하는 경우에는 포트와 분리하여 대괄호" "로 묶으십시오(예: \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s에는 4바이트 유니코드 문자를 포함할 수 없습니다." #, python-format msgid "%s is already stopped" msgstr "%s이(가) 이미 중지되었습니다." #, python-format msgid "%s is stopped" msgstr "%s이(가) 중지됨" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d." msgstr "'%(param)s' 값이 범위를 벗어났습니다. %(max)d를 초과하면 안 됩니다." msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "키스톤 인증 전략이 사용될 경우 --os_auth_url 옵션 또는 OS_AUTH_URL 환경 변수" "가 필요합니다.\n" msgid "A body is not expected with this request." msgstr "이 요청에는 본문이 없어야 합니다." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s인 메타데이터 정의 오브젝트가 namespace=" "%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s인 메타데이터 정의 특성이 namespace=%(namespace_name)s" "에 이미 존재합니다." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "name=%(resource_type_name)s인 메타데이터 정의 자원 유형이 이미 존재합니다." msgid "A set of URLs to access the image file kept in external store" msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL 세트" msgid "Amount of disk space (in GB) required to boot image." msgstr "이미지를 부팅하는 데 필요한 디스크 공간의 양(MB)" msgid "Amount of ram (in MB) required to boot image." msgstr "이미지를 부팅하는 데 필요한 RAM의 양(MB)" msgid "An identifier for the image" msgstr "이미지에 대한 ID" msgid "An identifier for the image member (tenantId)" msgstr "이미지 멤버에 대한 ID(tenantId)" msgid "An identifier for the owner of this task" msgstr "이 태스크 소유자의 ID" msgid "An identifier for the task" msgstr "태스크의 ID" msgid "An image file url" msgstr "이미지 파일 url" msgid "An image schema url" msgstr "이미지 스키마 url" msgid "An image self url" msgstr "이미지 자체 url" msgid "An import task exception occurred" msgstr "가져오기 작업 예외 발생" msgid "An object with the same identifier already exists." msgstr "동일한 ID를 갖는 오브젝트가 이미 존재합니다. " msgid "An object with the same identifier is currently being operated on." msgstr "동일한 ID가 있는 오브젝트가 현재 작동됩니다." msgid "An object with the specified identifier was not found." msgstr "지정된 ID를 갖는 오브젝트를 찾을 수 없습니다." msgid "An unknown exception occurred" msgstr "알 수 없는 예외가 발생했음" msgid "An unknown task exception occurred" msgstr "알 수 없는 태스크 예외 발생" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "'%(property)s' 속성은 읽기 전용입니다." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "'%(property)s' 속성은 예약되어 있습니다." #, python-format msgid "Attribute '%s' is read-only." msgstr "'%s' 속성은 읽기 전용입니다." #, python-format msgid "Attribute '%s' is reserved." msgstr "'%s' 속성은 예약되어 있습니다." msgid "Attribute container_format can be only replaced for a queued image." msgstr "큐에 있는 이미지에 대해 속성 container_format을 대체할 수 있습니다." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "큐에 있는 이미지에 대해 속성 disk_format을 대체할 수 있습니다." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "URL %(url)s의 Auth 서비스를 찾을 수 없습니다." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "인증 오류 - 파일 업로드 중에 토큰이 만료되었습니다. %s의 이미지 데이터를 삭제" "합니다." msgid "Authorization failed." msgstr "권한 부여에 실패했습니다." msgid "Available categories:" msgstr "사용 가능한 카테고리:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "잘못된 \"%s\" 쿼리 필터 형식입니다. ISO 8601 DateTime 표기법을 사용하십시오." #, python-format msgid "Bad header: %(header_name)s" msgstr "잘못된 헤더: %(header_name)s" msgid "Body expected in request." msgstr "요청에 본문이 있어야 합니다." msgid "Caching via API is not supported at this site." msgstr "이 사이트에서는 API를 통한 캐싱이 지원되지 않습니다." msgid "Cannot be a negative value" msgstr "음수 값일 수 없음" msgid "Cannot be a negative value." msgstr "음수 값이 될 수 없습니다." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "이미지 %(key)s '%(value)s'을(를) 정수로 변환할 수 없습니다." msgid "Cannot remove last location in the image." msgstr "이미지에서 마지막 위치를 제거할 수 없습니다." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "이미지 %(image_id)s 에 대한 데이터 저장 불가: %(error)s" msgid "Cannot set locations to empty list." msgstr "위치를 비어 있는 목록으로 설정할 수 없습니다." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "체크섬 검증에 실패했습니다. '%s' 이미지 캐시가 중단되었습니다." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "연결 오류/URL %(url)s에서 Auth 서비스에 대한 잘못된 요청입니다." #, python-format msgid "Constructed URL: %s" msgstr "URL을 구성함: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "%(image_id)s 이미지에 대한 손상된 이미지 다운로드" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30초 동안 시도한 후 %(host)s:%(port)s에 바인드할 수 없음" msgid "Could not find OVF file in OVA archive file." msgstr "OVA 아카이브 파일에서 OVF를 찾을 수 없습니다." #, python-format msgid "Could not find metadata object %s" msgstr "메타데이터 오브젝트 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find metadata tag %s" msgstr "메타데이터 태그 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find property %s" msgstr "특성 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find task %s" msgstr "태스크 %s을(를) 찾을 수 없음" #, python-format msgid "Could not update image: %s" msgstr "이미지를 업데이트할 수 없음: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "여러 디스크를 포함하는 OVA 패키지는 현재 지원되지 않습니다." msgid "Data supplied was not valid." msgstr "제공된 데이터가 올바르지 않습니다." msgid "Database is synced successfully." msgstr "데이터베이스가 성공적으로 동기화됐습니다." msgid "Date and time of image member creation" msgstr "이미지 멤버 작성 날짜 및 시간" msgid "Date and time of image registration" msgstr "이미지 등록 날짜 및 시간" msgid "Date and time of last modification of image member" msgstr "이미지 멤버의 최종 수정 날짜 및 시간" msgid "Date and time of namespace creation" msgstr "네임스페이스 작성 날짜 및 시간" msgid "Date and time of object creation" msgstr "오브젝트 작성 날짜 및 시간" msgid "Date and time of resource type association" msgstr "자원 유형 연관 날짜 및 시간" msgid "Date and time of tag creation" msgstr "태그 작성 날짜 및 시간" msgid "Date and time of the last image modification" msgstr "최종 이미지 수정의 날짜 및 시간" msgid "Date and time of the last namespace modification" msgstr "최종 네임스페이스 수정의 날짜 및 시간" msgid "Date and time of the last object modification" msgstr "최종 오브젝트 수정의 날짜 및 시간" msgid "Date and time of the last resource type association modification" msgstr "최종 자원 유형 연관 수정의 날짜 및 시간" msgid "Date and time of the last tag modification" msgstr "최종 태그 수정 날짜 및 시간" msgid "Datetime when this resource was created" msgstr "이 자원이 작성된 Datetime" msgid "Datetime when this resource was updated" msgstr "이 자원이 업데이트된 Datetime" msgid "Datetime when this resource would be subject to removal" msgstr "이 자원이 제거되는 Datetime" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "할당량을 초과하기 때문에 이미지 업로드를 거부하는 중: %s" msgid "Descriptive name for the image" msgstr "이미지에 대한 설명식 이름" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "%(driver_name)s 드라이버가 올바르게 구성되지 않았습니다. 이유: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "요청을 디코딩하는 중에 오류가 발생했습니다. URL이나 요청 본문에 Glance에서 디" "코딩할 수 없는 문자가 포함되어 있습니다." #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "이미지 %(image_id)s의 멤버를 페치하는 중에 오류 발생: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "저장소 구성에 오류가 있습니다. 이미지를 저장소에 추가할 수 없습니다." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "{\"member\": \"image_id\"} 형식의 멤버가 있어야 함" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "{\"status\": \"status\"} 형식의 상태가 있어야 함" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "삭제할 %(image_id)s 이미지를 찾는 데 실패함" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "삭제하기 위한 리소스 타입 %(resourcetype)s 검색 실패" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "이미지 캐시 데이터베이스를 초기화하지 못했습니다. 오류 발생: %s" #, python-format msgid "Failed to read %s from config" msgstr "구성에서 %s을(를) 읽지 못했음" #, python-format msgid "Failed to sync database: ERROR: %s" msgstr "데이터베이스 동기화를 실패했습니다. ERROR: %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "내부 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "파일 %(path)s에 올바르지 않은 백업 파일 %(bfile)s이(가) 있어 중단합니다." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "파일 기반 가져오기는 허용되지 않습니다. 이미지 데이터의 로컬이 아닌 소스를 사" "용하십시오." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "요청이 금지되고 메타데이터 정의 namespace=%s이(가) 표시되지 않습니다." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "요청 금지. 태스크 %s이(가) 표시되지 않음" msgid "Format of the container" msgstr "컨테이너의 형식" msgid "Format of the disk" msgstr "디스크의 형식" #, python-format msgid "Host \"%s\" is not valid." msgstr "\"%s\" 호스트가 올바르지 않습니다." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "호스트 및 포트 \"%s\"이(가) 올바르지 않습니다." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "사용자가 읽을 수 있는 정보 메시지는 적절한 경우에만 포함됨 (일반적으로 실패 " "시)" msgid "If true, image will not be deletable." msgstr "true일 경우 이미지는 삭제 불가능합니다." msgid "If true, namespace will not be deletable." msgstr "true일 경우 네임스페이스는 삭제 불가능합니다." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "이미지 %(id)s이(가) 사용 중이므로 이를 삭제할 수 없음: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "업로드한 이미지 %(image_id)s을(를) 찾을 수 없음. 이미지는 업로드 중에 삭제되" "었을 수 있음: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "%(image_id)s 이미지는 보호되므로 삭제할 수 없습니다." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "업로드 후에 %s 이미지를 찾을 수 없습니다. 업로드 동안 이미지가 삭제되었을 수 " "있습니다. 업로드된 청크를 정리합니다." #, python-format msgid "Image %s not found." msgstr "%s 이미지를 찾을 수 없음" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "이미지가 스토리지 할당량을 초과함: %s" msgid "Image id is required." msgstr "이미지 ID가 필요합니다." #, python-format msgid "Image is already present at store '%s'" msgstr "이미지가 스토어 '%s'에 이미 있습니다." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "이미지 %(id)s에 대한 이미지 멤버 한계 초과: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s에서 %(new_status)s(으)로의 이미지 상태 전이가 허용되지 않음" #, python-format msgid "Image storage media is full: %s" msgstr "이미지 스토리지 미디어 공간이 꽉 참: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "이미지 %(id)s에 대한 이미지 태그 한계 초과: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "이미지 업로드 문제: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "지정된 ID %(image_id)s을(를) 가진 이미지를 찾을 수 없음" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "인증 전략이 올바르지 않음. 예상: \"%(expected)s\", 수신: \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "올바르지 않은 요청: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "입력에 '%(key)s' 필드가 포함되어 있지 않음" msgid "Input to api_image_import task is empty." msgstr "api_image_import 작업에 대한 입력이 비어있습니다." #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "이미지 스토리지 미디어 권한 부족 : %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "이 자원에 대해 올바르지 않은 JSON 포인터: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 구성 파일의 구성이 올바르지 않습니다." msgid "Invalid configuration in property protection file." msgstr "특성 보호 파일의 올바르지 않은 구성입니다." #, python-format msgid "Invalid content type %(content_type)s" msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "올바르지 않은 필터 값 %s입니다. 따옴표를 닫지 않았습니다." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "올바르지 않은 필터 값 %s입니다. 닫기 따옴표 전에 쉼표가 없습니다." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "올바르지 않은 필터 값 %s입니다. 열기 따옴표 전에 쉼표가 없습니다." #, python-format msgid "Invalid int value for max_rows: %(max_rows)s" msgstr "max_rows: %(max_rows)s에 대한 int 값이 잘못됐습니다." msgid "Invalid location" msgstr "잘못된 위치" #, python-format msgid "Invalid location: %s" msgstr "올바르지 않은 위치: %s" msgid "Invalid locations" msgstr "잘못된 위치들" #, python-format msgid "Invalid locations: %s" msgstr "올바르지 않은 위치: %s" msgid "Invalid marker format" msgstr "올바르지 않은 마커 형식" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "올바르지 않은 조작: `%(op)s`. 다음 중 하나여야 합니다. %(available)s." msgid "Invalid position for adding a location." msgstr "위치를 추가하기에 올바르지 않은 포지션입니다." msgid "Invalid position for removing a location." msgstr "위치를 제거하기에 올바르지 않은 포지션입니다." msgid "Invalid service catalog json." msgstr "올바르지 않은 서비스 카탈로그 json입니다." #, python-format msgid "Invalid sort direction: %s" msgstr "올바르지 않은 정렬 방향: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "올바르지 않은 정렬 키: %(sort_key)s. 다음 중 하나여야 합니다. %(available)s." #, python-format msgid "Invalid status value: %s" msgstr "올바르지 않은 상태 값: %s" #, python-format msgid "Invalid status: %s" msgstr "올바르지 않은 상태: %s" #, python-format msgid "Invalid type value: %s" msgstr "올바르지 않은 유형 값: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "올바르지 않은 업데이트입니다. %s과(와) 동일한 이름의 메타데이터 정의 네임스페" "이스가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" "과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" "과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. 네임스페이스=%(namespace_name)s의 동일한 이름=" "%(name)s(으)로 메타데이터 정의 특성이 중복됩니다." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "매개변수 '%(param)s'의 올바르지 않은 값 '%(value)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "옵션 %(option)s에 올바르지 않은 값: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "올바르지 않은 가시성 값: %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 추가할 수 없습니다." msgid "It's not allowed to remove locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 제거할 수 없습니다." msgid "It's not allowed to update locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 업데이트할 수 없습니다." msgid "List of strings related to the image" msgstr "이미지와 관련된 문자열 목록" msgid "Malformed JSON in request body." msgstr "요청 본문에서 JSON의 형식이 올바르지 않습니다." msgid "Maximal age is count of days since epoch." msgstr "최대 기간은 epoch 이후의 일 수입니다." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "최대 경로 재지정(%(redirects)s)에 도달했습니다." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "멤버 %(member_id)s이(가) 이미지 %(image_id)s에 대해 중복됨" msgid "Member can't be empty" msgstr "멤버는 비어 있을 수 없음" msgid "Member to be added not specified" msgstr "추가할 멤버를 지정하지 않음" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 네임스페이스 %(namespace)s이(가) 보호되고 삭제되었을 수 있습" "니다." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 네임스페이스를 찾을 수 없음" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 오브젝트 %(object_name)s이(가) 보호되고 삭제되었을 수 있습니" "다." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 오브젝트를 찾을 수 없음" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 특성 %(property_name)s이(가) 보호되고 삭제되었을 수 있습니다." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 특성을 찾을 수 없음" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "메타데이터 정의 resource-type %(resource_type_name)s은(는) 시드(seed) 시스템 " "유형이고 삭제할 수 없습니다." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "메타데이터 정의 resource-type-association %(resource_type)s이(가) 보호되고 삭" "제할 수 없습니다." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "메타데이터 정의 태그 %(tag_name)s은(는) 보호되므로 삭제할 수 없습니다." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 태그를 찾을 수 없음" msgid "Missing required 'image_id' field" msgstr "필수인 'image_id' 필드가 없습니다." #, python-format msgid "Missing required credential: %(required)s" msgstr "필수 신임 정보 누락: %(required)s" msgid "Multi backend is not supported at this site." msgstr "이 사이트에서는 다중 백엔드를 지원하지 않습니다." #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "다중 '이미지' 서비스가 %(region)s 리젼에 일치합니다. 이는 일반적으로 리젼이 " "필요하지만 아직 리젼을 제공하지 않은 경우 발생합니다." #, python-format msgid "No image found with ID %s" msgstr "ID가 %s인 이미지를 찾을 수 없음" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "%(img)s 이미지에서 ID가 %(loc)s인 위치를 찾을 수 없음" #, python-format msgid "Not allowed to create members for image %s." msgstr "이미지 %s의 멤버를 작성할 수 없습니다." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "'%s' 상태의 이미지를 비활성화할 수 없음" #, python-format msgid "Not allowed to delete members for image %s." msgstr "이미지 %s의 멤버를 삭제할 수 없습니다." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "이미지 %s의 태그를 삭제할 수 없습니다." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "'%s' 상태의 이미지를 다시 활성화할 수 없음" #, python-format msgid "Not allowed to update members for image %s." msgstr "이미지 %s의 멤버를 업데이트할 수 없습니다." #, python-format msgid "Not allowed to update tags for image %s." msgstr "이미지 %s의 태그를 업데이트할 수 없습니다." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "이미지 %(image_id)s에 대한 이미지 데이터의 업로드가 허용되지 않음: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "정렬 디렉토리 수가 정렬 키 수와 일치하지 않음" msgid "OVA extract is limited to admin" msgstr "관리자만 OVA를 추출할 수 있음" msgid "Old and new sorting syntax cannot be combined" msgstr "이전 및 새 저장 구문은 결합할 수 없음" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "\"%s\" 오퍼레이션에는 \"value\"라는 이름의 멤버가 필요합니다." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "조작 오브젝트에는 \"add\", \"remove\", 또는 \"replace\" 멤버 중 하나만 포함되" "어야 합니다." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "조작 오브젝트에는 \"add\", \"remove\",또는 \"replace\" 멤버 중 하나만 포함되" "어야 합니다." msgid "Operations must be JSON objects." msgstr "오퍼레이션은 JSON 오브젝트여야 합니다." #, python-format msgid "Original locations is not empty: %s" msgstr "원본 위치가 비어있지 않음: %s" msgid "Owner can't be updated by non admin." msgstr "비관리자는 소유자를 업데이트할 수 없습니다." msgid "Owner of the image" msgstr "이미지의 소유자" msgid "Owner of the namespace." msgstr "네임스페이스의 소유자입니다." msgid "Param values can't contain 4 byte unicode." msgstr "매개변수 값에 4바이트 유니코드를 포함할 수 없습니다." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "`%s` 포인터에 인식되는 이스케이프 시퀀스가 아닌 \"~\"가 포함되어 있습니다." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "포인터 `%s`에 인접 \"/\"가 포함됩니다." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "포인터 `%s`에 올바른 토큰이 포함되어 있지 않습니다." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "`%s` 포인터가 \"/\"로 시작하지 않습니다." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "포인터 `%s`이(가) \"/\"로 끝납니다." #, python-format msgid "Port \"%s\" is not valid." msgstr "\"%s\" 포트가 올바르지 않습니다." #, python-format msgid "Process %d not running" msgstr "프로세스 %d이(가) 실행 중이지 않음" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "특성 %(property_name)s이(가) 예상 자원 유형 연관 접두부인 '%(prefix)s'(으)로 " "시작하지 않습니다." #, python-format msgid "Property %s already present." msgstr "%s 특성이 이미 존재합니다." #, python-format msgid "Property %s does not exist." msgstr "%s 특성이 존재하지 않습니다." #, python-format msgid "Property %s may not be removed." msgstr "%s 특성을 제거할 수 없습니다." #, python-format msgid "Property %s must be set prior to saving data." msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." msgid "Property names can't contain 4 byte unicode." msgstr "특성 이름에 4바이트 유니코드를 포함할 수 없습니다." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "제공된 오브젝트가 스키마 '%(schema)s'에 일치하지 않음: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "제공된 태스크의 상태가 지원되지 않음: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "제공된 태스크 유형이 지원되지 않음: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "사용자에게 익숙한 네임스페이스 설명을 제공합니다." msgid "Received invalid HTTP redirect." msgstr "올바르지 않은 HTTP 경로 재지정을 수신했습니다." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "권한 부여를 위해 %(uri)s(으)로 경로 재지정 중입니다." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "API 서버에서 레지스트리가 올바르게 구성되지 않았습니다. 이유: %(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s을(를) 다시 로드할 수 없음" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 다시 로드 중" #, python-format msgid "Removing stale pid file %s" msgstr "시간이 경과된 pid 파일 %s을(를) 제거하는 중" msgid "Request body must be a JSON array of operation objects." msgstr "요청 본문은 오퍼레이션 오브젝트의 JSON 배열이어야 합니다." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone의 응답에 Glance 엔드포인트가 들어있지 않습니다." msgid "Scope of image accessibility" msgstr "이미지 접근성의 범위" msgid "Scope of namespace accessibility." msgstr "네임스페이스 접근성의 범위입니다." msgid "Scrubber encountered an error while trying to fetch scrub jobs." msgstr "Scrubber에서 scrub jobs를 가져오다 오류가 발생했습다." #, python-format msgid "Server %(serv)s is stopped" msgstr "서버 %(serv)s이(가) 중지됨" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "서버 작업자 작성에 실패함: %(reason)s." msgid "Signature verification failed" msgstr "서명 검증 실패" msgid "Size of image file in bytes" msgstr "이미지 파일의 크기(바이트)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "일부 자원 유형은 인스턴스 당 둘 이상의 키 / 값 쌍을 허용합니다.예를 들어, " "Cinder는 볼륨에 사용자 및 이미지 메타데이터를 허용합니다. 이미지 특성 메타데" "이터만 Nova(스케줄링 또는 드라이버)에 의해 평가됩니다. 이 특성은 모호성을 제" "거하기 위해 네임스페이스 대상을 허용합니다." msgid "Sort direction supplied was not valid." msgstr "제공된 정렬 방향이 올바르지 않습니다." msgid "Sort key supplied was not valid." msgstr "제공되는 정렬 키가 올바르지 않습니다." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "제공된 자원 유형에 사용할 접두부를 지정합니다. 지정된 자원 유형에 적용되는 경" "우 네임스페이스의 모든 특성은 이 접두부로 시작해야 합니다. 접두부 구분 기호" "(예: 콜론 :)를 포함해야 합니다." msgid "Specifying both 'visibility' and 'is_public' is not permiitted." msgstr "'visibility'와 'is_public'을 모두 지정하는 건 허용되지 않습니다." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "상태는 \"보류 중\", \"수락됨\" 또는 \"거부됨\"이어야 합니다." msgid "Status not specified" msgstr "상태를 지정하지 않음" msgid "Status of the image" msgstr "이미지의 상태" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "%(cur_status)s에서 %(new_status)s(으)로의 상태 전이가 허용되지 않음" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 중지 중" msgid "Stores parameter and x-image-meta-store header can't be both specified" msgstr "저장소 매개 변수와 x-image-meta-store 헤더를 둘 다 지정할 수 없습니다." msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' 이미지 속성에 대해 지원되는 값" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' 이미지 속성에 대해 지원되는 값" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s이(가) %(rsn)s이므로 재파생이 억제되었습니다." msgid "System SIGHUP signal received." msgstr "시스템 SIGHUP 신호를 수신했습니다." #, python-format msgid "Task '%s' is required" msgstr "태스크 '%s'이(가) 필요함" msgid "Task does not exist" msgstr "태스크가 존재하지 않음" msgid "Task failed due to Internal Error" msgstr "내부 오류로 인해 태스크 실패" msgid "Task was aborted externally" msgstr "작업이 외부에서 중단됐습니다." msgid "Task was not configured properly" msgstr "태스크가 제대로 구성되지 않음" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "지정된 ID가 %(task_id)s인 태스크를 찾을 수 없음" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "\"changes-since\" 필터는 v2에서 더 이상 사용할 수 없습니다." #, python-format msgid "The CA file you specified %s does not exist" msgstr "사용자가 지정한 CA 파일 %s이(가) 존재하지 않음" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "이 태스크 %(task_id)s에서 작성 중인 이미지 %(image_id)s 오브젝트는 더 이상 향" "후 처리에 사용할 수 있는 올바른 상태가 아닙니다." msgid "The Store URI was malformed." msgstr "저장소 URI의 형식이 올바르지 않습니다." #, python-format msgid "The cert file you specified %s does not exist" msgstr "사용자가 지정한 인증 파일 %s이(가) 존재하지 않음" msgid "The current status of this task" msgstr "이 태스크의 현재 상태" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "디바이스 하우징 이미지 캐시 디렉터리 %(image_cache_dir)s의 Device 는 xattr을 " "지원하지 않습니다. fstab을 수정하거나 user_xattr 옵션을 디바이스 하우징 캐시 " "디렉터리의 적합한 행에 추가하기 바랍니다." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "제공된 uri가 올바르지 않습니다. 다음 지원 uri 목록에서 올바른 uri를 지정하십" "시오. %(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "수신 이미지가 너무 큼: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "사용자가 지정한 키 파일 %s이(가) 존재하지 않음" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 위치 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "이 이미지에 대해 허용된 이미지 멤버 수의 한계가 초과되었습니다. 시도함: " "%(attempted)s, 최대: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 특성 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 태그 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "위치 %(location)s이(가) 이미 있음" #, python-format msgid "The location data has an invalid ID: %d" msgstr "위치 데이터의 ID가 올바르지 않음: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "name=%(record_name)s인 메타데이터 정의 %(record_type)s이(가) 삭제되지 않습니" "다. 기타 레코드를 여전히 참조합니다." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "메타데이터 정의 namespace=%(namespace_name)s이(가) 이미 존재합니다." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s인 메타데이터 정의 오브젝트를 namespace=" "%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s인 메타데이터 정의 특성을 namespace=%(namespace_name)s" "에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " "namespace=%(namespace_name)s에 이미 존재합니다." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " "namespace=%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "name=%(resource_type_name)s인 메타데이터 정의 자원 유형을 찾을 수 없습니다." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s인 메타데이터 정의 태그를 namespace=%(namespace_name)s에서 찾을 " "수 없습니다." msgid "The parameters required by task, JSON blob" msgstr "태스크에서 필요로 하는 매개변수, JSON blob" msgid "The provided image is too large." msgstr "제공된 이미지가 너무 큽니다." msgid "The request returned 500 Internal Server Error." msgstr "요청 시 500 내부 서버 오류가 리턴되었습니다." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "요청에서 '503 서비스 사용 불가능'을 리턴했습니다. 이는 일반적으로 서비스 과부" "하나 기타 일시적 정전일 경우 발생합니다." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "요청이 302 다중 선택사항을 리턴했습니다. 이는 일반적으로 요청 URI에 버전 표시" "기를 포함하지 않았음을 의미합니다.\n" "\n" "리턴된 응답의 본문:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "요청에서 '413 요청 엔티티가 너무 큼'을 리턴했습니다. 이는 일반적으로 등급 한" "도나 할당량 임계값을 위반했음을 의미합니다.\n" "\n" "응답 본문:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "요청이 예상치 않은 상태를 리턴함: %(status)s.\n" "\n" "응답 본문:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "요청된 이미지가 비활성화되었습니다. 이미지 데이터 다운로드가 금지됩니다." msgid "The result of current task, JSON blob" msgstr "현재 태스크의 결과, JSON blob" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "데이터 크기 %(image_size)s이(가) 남은 한도 바이트 %(remaining)s을(를) 초과합" "니다." #, python-format msgid "The specified member %s could not be found" msgstr "지정된 멤버 %s을(를) 찾을 수 없음" #, python-format msgid "The specified metadata object %s could not be found" msgstr "지정된 메타데이터 오브젝트 %s을(를) 찾을 수 없음" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "지정된 메타데이터 태그 %s을(를) 찾을 수 없음" #, python-format msgid "The specified namespace %s could not be found" msgstr "지정된 네임스페이스 %s을(를) 찾을 수 없음" #, python-format msgid "The specified property %s could not be found" msgstr "지정된 특성 %s을(를) 찾을 수 없음" #, python-format msgid "The specified resource type %s could not be found " msgstr "지정된 자원 유형 %s을(를) 찾을 수 없음" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" "음" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" "습니다." msgid "The status of this image member" msgstr "이 이미지 멤버의 상태" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "대상 멤버 %(member_id)s이(가) 이미 이미지 %(image_id)s." msgid "The type of task represented by this content" msgstr "이 컨텐츠에서 나타내는 태스크의 유형" msgid "The unique namespace text." msgstr "고유 네임스페이스 텍스트입니다." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "사용자에게 익숙한 네임스페이스의 이름입니다. 가능한 경우 UI에서 사용됩니다." msgid "There was an error configuring the client." msgstr "클라이언트 구성 오류가 있었습니다." msgid "There was an error connecting to a server" msgstr "서버 연결 오류가 있었습니다." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "해당 동작은 현재 Glance 작업에 대해서는 허용되지 않습니다. 이들은 expires_at " "특성에 기반한 시간에 도달하면 자동으로 삭제됩니다." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "이후에 태스크가 활성이 되는 시간(시), 성공 또는 실패" msgid "Too few arguments." msgstr "인수가 너무 적습니다." msgid "URL to access the image file kept in external store" msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "pid 파일 %(pid)s을(를) 작성할 수 없습니다. 비루트로 실행 중인지 확인하십시" "오.\n" "임시 파일로 돌아가 다음을 사용하여 %(service)s 서비스를 중지할 수 있습니다.\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "알 수 없는 연산자 '%s'(으)로 필터링할 수 없습니다." msgid "Unable to filter on a range with a non-numeric value." msgstr "숫자가 아닌 값을 사용하여 범위에서 필터링할 수 없습니다." msgid "Unable to filter on a unknown operator." msgstr "알 수 없는 연산자를 필터링할 수 없습니다." msgid "Unable to filter using the specified operator." msgstr "지정된 연산자를 사용하여 필터링할 수 없습니다." msgid "Unable to filter using the specified range." msgstr "지정된 범위를 사용하여 필터링할 수 없습니다." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON 스키마 변경에서 '%s'을(를) 찾을 수 없음" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON 스키마 변경에서 `op`를 찾을 수 없습니다. 다음 중 하나여야 합니다. " "%(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "파일 디스크립터 한계를 늘릴 수 없습니다. 비루트로 실행 중인지 확인하십시오." #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "구성 파일 %(conf_file)s에서 %(app_name)s을(를) 로드할 수 없습니다.\n" "오류 발생: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "스키마를 로드할 수 없음: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s에 대한 붙여넣기 구성 파일을 찾을 수 없습니다." #, python-format msgid "Unable to upload duplicate image data for image %(image_id)s: %(error)s" msgstr "" "이미지 %(image_id)s에 대한 중복된 이미지 데이터를 업로드할 수 없음: %(error)s" msgid "Unexpected body type. Expected list/dict." msgstr "예기치않은 본문 타입. list/dict를 예상합니다." #, python-format msgid "Unexpected response: %s" msgstr "예상치 않은 응답: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "알 수 없는 auth 전략 '%s'" #, python-format msgid "Unknown command: %s" msgstr "알 수 없는 명령: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" msgid "Unrecognized JSON Schema draft version" msgstr "인식되지 않는 JSON 스키마 드래프트 버전" msgid "Virtual size of image in bytes" msgstr "이미지의 가상 크기(바이트)" msgid "" "Visibility must be one of \"community\", \"public\", \"private\", or \"shared" "\"" msgstr "" "가시성은 \"community\", \"public\", \"private\" 또는 \"shared\" 중 하나여야 " "합니다." #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "pid %(pid)s(%(file)s)이 종료될 때까지 15초 대기함, 포기하는 중" msgid "You are not authenticated." msgstr "인증되지 않은 사용자입니다." #, python-format msgid "You are not authorized to complete %(action)s action." msgstr "%(action)s 작업을 완료할 수 있는 권한이 없습니다." msgid "You are not authorized to complete this action." msgstr "이 조치를 완료할 권한이 없습니다." #, python-format msgid "You are not authorized to lookup image %s." msgstr "이미지 %s을(를) 검색할 권한이 없습니다." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "이미지 %s의 멤버를 검색할 권한이 없습니다." msgid "You are not permitted to create image members for the image." msgstr "이미지에 대한 이미지 멤버를 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s' 소유의 이미지를 작성할 권한이 없습니다." msgid "You cannot delete image member." msgstr "이미지 멤버를 삭제할 수 없습니다." msgid "You do not own this image" msgstr "이 이미지를 소유하지 않음" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "연결에 SSL을 사용하도록 선택하고 인증을 제공했지만 key_file 매개변수를 제공하" "지 못했거나 GLANCE_CLIENT_KEY_FILE 환경 변수를 설정하지 못했습니다." msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "연결에 SSL을 사용하도록 선택하고 키를 제공했지만 cert_file 매개변수를 제공하" "지 못했거나 GLANCE_CLIENT_CERT_FILE 환경 변수를 설정하지 못했습니다." msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__()가 예상치 못한 키워드 인수 '%s'을(를) 가져옴" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "업데이트에서 %(current)s에서 %(next)s(으)로 상태 전이할 수 (from_state=" "%(from)s을(를) 원함)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "사용자 정의 특성 (%(props)s)이(가) 기본 특성과 충돌함" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "이 플랫폼에서 eventlet 'poll'이나 'selects' 허브를 모두 사용할 수 없음" msgid "limit param must be an integer" msgstr "limit 매개변수는 정수여야 함" msgid "limit param must be positive" msgstr "limit 매개변수가 양수여야 함" msgid "md5 hash of image contents." msgstr "이미지 컨텐츠의 md5 해시입니다." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image()가 예상치 못한 키워드 %s을(를) 가져옴" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s을(를) 실행할 수 없음. 오류 발생: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id가 너무 김, 최대 크기 %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/pt_BR/0000775000175000017500000000000000000000000016554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000000000000000020341 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/pt_BR/LC_MESSAGES/glance.po0000664000175000017500000014664400000000000022151 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Rodrigo Felix de Almeida , 2014 # Volmar Oliveira Junior , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:22+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "exceção %(cls)s foi disparada na última chamada RPC: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s não localizado na lista de membros da imagem %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) está em execução..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s parece já estar em execução: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s não foi configurado adequadamente. Não foi " "possível carregar o armazenamento de sistema de arquivos" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s não foi configurado adequadamente. Faltando o " "diretório de trabalho: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)sing %(serv)s com %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Especifique um par host:porta, em que o host é um endereço IPv4, IPv6, " "nome do host ou FQDN. Se você estiver usando um endereço IPv6, coloque-o nos " "suportes separadamente da porta (ou seja, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s não pode conter caracteres de unicode de 4 bytes." #, python-format msgid "%s is already stopped" msgstr "%s já está parado" #, python-format msgid "%s is stopped" msgstr "%s está parado" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "opção --os_auth_url ou variável de ambiente OS_AUTH_URL requerida quando " "estratégia de autenticação keystone está ativada\n" msgid "A body is not expected with this request." msgstr "Um corpo não é esperado com essa solicitação." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Um objeto de definição de metadados com o nome=%(object_name)s já existe no " "namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Uma propriedade de definição de metadados com o nome=%(property_name)s já " "existe no namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Um tipo de recurso de definição de metadados com o nome=" "%(resource_type_name)s já existe." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Um conjunto de URLs para acessar o arquivo de imagem mantido em " "armazenamento externo" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Quantidade de espaço em disco (em GB) necessária para a imagem de " "inicialização." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantidade de ram (em MB) necessária para a imagem de inicialização." msgid "An identifier for the image" msgstr "Um identificador para a imagem" msgid "An identifier for the image member (tenantId)" msgstr "Um identificador para o membro de imagem (tenantId)" msgid "An identifier for the owner of this task" msgstr "Um identificador para o proprietário desta tarefa" msgid "An identifier for the task" msgstr "Um identificador para a tarefa" msgid "An image file url" msgstr "Uma URL de arquivo de imagem" msgid "An image schema url" msgstr "Uma URL de esquema de imagem" msgid "An image self url" msgstr "Uma URL automática de imagem" msgid "An import task exception occurred" msgstr "Ocorreu uma exceção em uma tarefa importante" msgid "An object with the same identifier already exists." msgstr "Um objeto com o mesmo identificador já existe." msgid "An object with the same identifier is currently being operated on." msgstr "Um objeto com o mesmo identificador está atualmente sendo operado." msgid "An object with the specified identifier was not found." msgstr "Um objeto com o identificador especificado não foi localizado." msgid "An unknown exception occurred" msgstr "Ocorreu uma exceção desconhecida" msgid "An unknown task exception occurred" msgstr "Ocorreu uma exceção de tarefa desconhecida" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "O atributo '%(property)s' é somente leitura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "O atributo '%(property)s' é reservado." #, python-format msgid "Attribute '%s' is read-only." msgstr "Atributo '%s' é apenas leitura." #, python-format msgid "Attribute '%s' is reserved." msgstr "Atributo '%s' é reservado." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "Atributo container_format pode ser apenas substituído por uma imagem na fila." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "Atributo disk_format pode ser apenas substituído por uma imagem na fila." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Serviço de autenticação na URL %(url)s não localizado." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Erro de autenticação - o token pode ter expirado durante o envio do arquivo. " "Removendo dados da imagem %s." msgid "Authorization failed." msgstr "Falha de autorização." msgid "Available categories:" msgstr "Categorias disponíveis:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato de filtro de consulta \"%s\" inválido. Use a notação ISO 8601 " "DateTime." #, python-format msgid "Bad header: %(header_name)s" msgstr "Cabeçalho inválido: %(header_name)s" msgid "Body expected in request." msgstr "Corpo esperado na solicitação." msgid "Cannot be a negative value" msgstr "Não pode ser um valor negativo" msgid "Cannot be a negative value." msgstr "Não pode ser um valor negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Não é possível converter a imagem %(key)s '%(value)s' para um número inteiro." msgid "Cannot remove last location in the image." msgstr "Não é possível remover o último local na imagem." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Não é possível salvar os dados da imagem %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Não é possível configurar locais para esvaziar a lista." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "A soma de verificação falhou. Interrompido o armazenamento em cache da " "imagem '%s'." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Erro de conexão/solicitação inválida para serviço de autenticação na URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construída: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Download de imagem corrompido para a imagem %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Não foi possível ligar a %(host)s:%(port)s depois de tentar por 30 segundos" msgid "Could not find OVF file in OVA archive file." msgstr "Não foi possível localizar o arquivo OVF no archive OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Não foi possível localizar o objeto de metadados %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Não foi possível localizar a identificação de metadados %s" #, python-format msgid "Could not find property %s" msgstr "Não é possível localizar a propriedade %s" #, python-format msgid "Could not find task %s" msgstr "Não foi possível localizar tarefa %s" #, python-format msgid "Could not update image: %s" msgstr "Não foi possível atualizar a imagem: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Atualmente, os pacotes OVA que contêm diversos discos não são suportados. " msgid "Data supplied was not valid." msgstr "Os dados fornecidos não eram válidos." msgid "Date and time of image member creation" msgstr "Data e hora da criação de membro da imagem" msgid "Date and time of image registration" msgstr "Data e hora do registro da imagem " msgid "Date and time of last modification of image member" msgstr "Data e hora da última modificação de membro da imagem" msgid "Date and time of namespace creation" msgstr "Data e hora da criação do namespace" msgid "Date and time of object creation" msgstr "Data e hora da criação do objeto" msgid "Date and time of resource type association" msgstr "Data e hora da associação do tipo de recurso " msgid "Date and time of tag creation" msgstr "Data e hora da criação da identificação " msgid "Date and time of the last image modification" msgstr "Data e hora da última modificação da imagem " msgid "Date and time of the last namespace modification" msgstr "Data e hora da última modificação do namespace " msgid "Date and time of the last object modification" msgstr "Data e hora da última modificação do objeto" msgid "Date and time of the last resource type association modification" msgstr "Data e hora da última modificação de associação de tipo de recurso " msgid "Date and time of the last tag modification" msgstr "Data e hora da última modificação da identificação " msgid "Datetime when this resource was created" msgstr "Data/hora quando este recurso foi criado" msgid "Datetime when this resource was updated" msgstr "Data/Hora quando este recurso foi atualizado" msgid "Datetime when this resource would be subject to removal" msgstr "Data/Hora quando este recurso deve ser objeto de remoção" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Negando a tentativa de upload da imagem porque ela excede a cota: %s" msgid "Descriptive name for the image" msgstr "Nome descritivo para a imagem" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "O driver %(driver_name)s não pôde ser configurado corretamente. Motivo: " "%(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Erro ao decodificar sua solicitação. A URL ou o corpo da solicitação " "continha caracteres que não puderam ser decodificados pelo Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Erro ao buscar membros da imagem %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Erro na configuração do armazenamento. A inclusão de imagens para " "armazenamento está desativada." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "O membro era esperado no formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "O estado era esperado no formato: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Falhar ao localizar a imagem %(image_id)s para excluir" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Falha ao localizar o tipo de recurso %(resourcetype)s para excluir" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Falha ao inicializar o banco de dados de cache da imagem. Erro obtido: %s" #, python-format msgid "Failed to read %s from config" msgstr "Falha ao ler %s da configuração" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " "a erro de HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " "a erro interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "O arquivo %(path)s tem arquivo de backup inválido %(bfile)s, interrompendo." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Importações baseadas em arquivo não são permitidas. Use uma fonte não local " "de dados de imagem." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Proibindo solicitação, o namespace de definição de metadados=%s não é " "visível." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Proibindo solicitação, a tarefa %s não está visível" msgid "Format of the container" msgstr "Formato do contêiner" msgid "Format of the disk" msgstr "Formato do disco" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" não é válido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host e porta \"%s\" não são válidos." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Mensagem informativa legível apenas incluída quando apropriado (geralmente " "em falha)" msgid "If true, image will not be deletable." msgstr "Se true, a imagem não será excluível." msgid "If true, namespace will not be deletable." msgstr "Se verdadeiro, o namespace não poderá ser excluído." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "A imagem %(id)s não pôde ser excluída, pois ela está sendo usada: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Imagem %(image_id)s não pôde ser localizada após o upload. A imagem pode ter " "sido excluída durante o upload: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "A imagem %(image_id)s está protegida e não pode ser excluída." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "A imagem %s não pôde ser localizada após o upload. A imagem pode ter sido " "excluída durante o upload, limpando os chunks transferidos por upload." #, python-format msgid "Image %s not found." msgstr "Imagem %s não localizada." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Imagem excede a cota de armazenamento: %s" msgid "Image id is required." msgstr "ID da imagem é obrigatório." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "O limite do membro da imagem excedido para imagem %(id)s: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Transição de status de imagem de %(cur_status)s para %(new_status)s não é " "permitido" #, python-format msgid "Image storage media is full: %s" msgstr "A mídia de armazenamento da imagem está cheia: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "" "O limite de identificação da imagem excedeu para a imagem %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problema ao fazer upload de imagem: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Imagem com o ID fornecido %(image_id)s não foi localizada" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Estratégia de autorização incorreta; esperava-se \"%(expected)s\", mas foi " "recebido \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Requisição incorreta: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "A entrada não contém o campo '%(key)s'" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Permissões insuficientes na mídia de armazenamento da imagem: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Ponteiro de JSON inválido para este recurso: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "Configuração inválida no arquivo de configuração glance-swift." msgid "Invalid configuration in property protection file." msgstr "Configuração inválida no arquivo de proteção de propriedade." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo de conteúdo inválido %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valor de filtro inválido %s. A aspa não está fechada." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de " "fechamento." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de abertura." msgid "Invalid location" msgstr "Local inválido" #, python-format msgid "Invalid location: %s" msgstr "Localidade inválida: %s" msgid "Invalid locations" msgstr "Locais inválidos" #, python-format msgid "Invalid locations: %s" msgstr "Localidades inválidas: %s" msgid "Invalid marker format" msgstr "Formato de marcador inválido" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operação inválida: `%(op)s`. Ela deve ser um das seguintes: %(available)s." msgid "Invalid position for adding a location." msgstr "Posição inválida para adicionar uma localidade." msgid "Invalid position for removing a location." msgstr "Posição inválida para remover uma localidade." msgid "Invalid service catalog json." msgstr "Catálogo de serviço json inválido." #, python-format msgid "Invalid sort direction: %s" msgstr "Direção de classificação inválida: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Chave de classificação inválida: %(sort_key)s. Deve ser um dos seguintes: " "%(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Valro de status inválido: %s" #, python-format msgid "Invalid status: %s" msgstr "Status inválido: %s" #, python-format msgid "Invalid type value: %s" msgstr "Valor de tipo inválido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Atualização inválida. Ela resultaria em uma propriedade de definição de " "metadados duplicada com o mesmo nome de %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em um objeto de definição de metadados " "duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em um objeto de definição de metadados " "duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em uma propriedade de definição de " "metadados duplicada com o mesmo nome=%(name)s no namespace=" "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Valor inválido '%(value)s' para o parâmetro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valor inválido para a opção %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valor de visibilidade inválido: %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "Não é permitido adicionar locais se os locais forem invisíveis." msgid "It's not allowed to remove locations if locations are invisible." msgstr "Não é permitido remover locais se os locais forem invisíveis." msgid "It's not allowed to update locations if locations are invisible." msgstr "Não é permitido atualizar locais se os locais forem invisíveis." msgid "List of strings related to the image" msgstr "Lista de sequências relacionadas à imagem" msgid "Malformed JSON in request body." msgstr "JSON malformado no corpo da solicitação." msgid "Maximal age is count of days since epoch." msgstr "A idade máxima é a contagem de dias desde a época." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "O máximo de redirecionamentos (%(redirects)s) foi excedido." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "O membro %(member_id)s é duplicado para a imagem %(image_id)s" msgid "Member can't be empty" msgstr "Membro não pode ser vazio" msgid "Member to be added not specified" msgstr "Membro a ser incluído não especificado" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "O namespace de definição de metadados %(namespace)s é protegido e não pode " "ser excluída." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Namespace de definição de metadados não localizado para o id=%s" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "O objeto de definição de metadados %(object_name)s é protegido e não pode " "ser excluída." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Objeto de definição de metadados não localizado para o id=%s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "A propriedade de definição de metadados %(property_name)s é protegida e não " "pode ser excluída." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Propriedade de definição de metadados não localizada para id=%s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "A definição de metadados resource-type %(resource_type_name)s é um tipo de " "sistema com valor sementee não pode ser excluída." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "A definição de metadados resource-type-association %(resource_type)s é " "protegida e não poderá ser excluída." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "A identificação da definição de metadados %(tag_name)s é protegida e não " "pode ser excluída." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Identificação de definição de metadados não localizada para o id=%s" #, python-format msgid "Missing required credential: %(required)s" msgstr "Credencial necessária ausente: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Diversas correspondências do serviço de 'imagem' para a região %(region)s. " "Isso geralmente significa que uma região é necessária e você não a forneceu." #, python-format msgid "No image found with ID %s" msgstr "Nenhuma imagem encontrada com o ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Nenhum local localizado com o ID %(loc)s da imagem %(img)s" #, python-format msgid "Not allowed to create members for image %s." msgstr "Não é permitido criar membros para a imagem %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Não é permitido desativar a imagem no status '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Não é permitido excluir membros para a imagem %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Não é permitido excluir identificações para a imagem %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Não é permitido reativar a imagem no status '%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "Não é permitido atualizar os membros para a imagem %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Não é permitido atualizar as identificações para a imagem %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Não é permitido fazer upload de dados de imagem para a imagem %(image_id)s: " "%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "O número de diretórios de classificação não corresponde ao número de chaves " "de classificação" msgid "OVA extract is limited to admin" msgstr "O extrato de OVA é limitado para administrador" msgid "Old and new sorting syntax cannot be combined" msgstr "A sintaxe de classificação nova e antiga não podem ser combinadas" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "A operação \"%s\" requer um membro denominado \"valor\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Objetos de operação devem conter exatamente um membro denominado \"incluir" "\", \"remover\" ou \"substituir\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Objetos de operação devem conter apenas um membro denominado \"incluir\", " "\"remover\" ou \"substituir\"." msgid "Operations must be JSON objects." msgstr "As operações devem ser objetos JSON." #, python-format msgid "Original locations is not empty: %s" msgstr "Localidade original não está vazia: %s" msgid "Owner can't be updated by non admin." msgstr "O proprietário não pode ser atualizado por um não administrador." msgid "Owner of the image" msgstr "Proprietário da imagem" msgid "Owner of the namespace." msgstr "Proprietário do namespace." msgid "Param values can't contain 4 byte unicode." msgstr "Valores de parâmetro não podem conter unicode de 4 bytes." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "O ponteiro `%s` contém \"~\" não parte de uma sequência de escape " "reconhecida." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "O ponteiro `%s` contém uma \"/\" adjacente." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "O ponteiro `%s` não contém um token válido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "O ponteiro `%s` não começa com \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "O ponteiro `%s` termina com \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Porta \"%s\" não é válida." #, python-format msgid "Process %d not running" msgstr "O processo %d não está em execução" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "As propriedades %s devem ser configuradas antes de salvar os dados." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "A propriedade %(property_name)s não começa com o prefixo de associação do " "tipo de recurso esperado de ‘%(prefix)s‘." #, python-format msgid "Property %s already present." msgstr "Propriedade %s já presente." #, python-format msgid "Property %s does not exist." msgstr "A propriedade %s não existe." #, python-format msgid "Property %s may not be removed." msgstr "A propriedade %s pode não ser removida." #, python-format msgid "Property %s must be set prior to saving data." msgstr "A propriedade %s deve ser configurada antes de salvar os dados." msgid "Property names can't contain 4 byte unicode." msgstr "Os nomes de propriedade não podem conter unicode de 4 bytes." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "O objeto fornecido não corresponde ao esquema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Status de tarefa fornecido não é suportado: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Tipo de tarefa fornecido não é suportado: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fornece uma descrição fácil do namespace." msgid "Received invalid HTTP redirect." msgstr "Redirecionamento de HTTP inválido recebido." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirecionando para %(uri)s para obter autorização." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "O registro não foi configurado corretamente no servidor de API. Motivo: " "%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Recarregamento de %(serv)s não suportado" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Recarregando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" msgid "Request body must be a JSON array of operation objects." msgstr "" "O corpo da solicitação deve ser uma matriz JSON de objetos de operação." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "A resposta de Keystone não contém um terminal de Visão Rápida." msgid "Scope of image accessibility" msgstr "Escopo de acessibilidade de imagem" msgid "Scope of namespace accessibility." msgstr "Escopo da acessibilidade do namespace." #, python-format msgid "Server %(serv)s is stopped" msgstr "O servidor %(serv)s foi interrompido" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Falha na criação do trabalhador do servidor: %(reason)s." msgid "Signature verification failed" msgstr "A verificação de assinatura falhou" msgid "Size of image file in bytes" msgstr "Tamanho do arquivo da imagem em bytes " msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Alguns tipos de recurso permitem mais de um par de chave/valor por " "instância. Por exemplo, o Cinder permite metadados do usuário e da imagem " "em volumes. Somente os metadados de propriedades da imagem são avaliados " "pelo Nova (planejamento ou drivers). Essa propriedade permite que um destino " "de namespace remova a ambiguidade." msgid "Sort direction supplied was not valid." msgstr "A direção de classificação fornecida não era válida." msgid "Sort key supplied was not valid." msgstr "A chave de classificação fornecida não era válida." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Especifica o prefixo a ser usado para o tipo de recurso determinado. " "Qualquer propriedade no namespace deve ter esse prefixo ao ser aplicada ao " "tipo de recurso especificado. O separador de prefixo deve ser incluído (p. " "ex., dois pontos :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "O status deve ser \"pendente\", \"aceito\" ou \"rejeitado\"." msgid "Status not specified" msgstr "Status não especificado" msgid "Status of the image" msgstr "Status da imagem" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Status de transição de %(cur_status)s para %(new_status)s não é permitido" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Parando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "Valores suportados para o atributo de imagem 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valores suportados para o atributo de imagem 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Novo spawn suprimido já que %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Sinal SIGHUP do sistema recebido." #, python-format msgid "Task '%s' is required" msgstr "Tarefa '%s é obrigatória" msgid "Task does not exist" msgstr "A tarefa não existe" msgid "Task failed due to Internal Error" msgstr "A tarefa falhou devido a Erro interno" msgid "Task was not configured properly" msgstr "A tarefa não foi configurada adequadamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Tarefa com o ID fornecido %(task_id)s não foi localizada" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "O filtro \" changes-since \" não está mais disponível na v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "O arquivo CA especificado %s não existe" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "O objeto da Imagem %(image_id)s que está sendo criado por esta tarefa " "%(task_id)s não está mais no status válido para processamento adicional." msgid "The Store URI was malformed." msgstr "O URI de Armazenamento foi malformado." #, python-format msgid "The cert file you specified %s does not exist" msgstr "O arquivo de certificado especificado %s não existe" msgid "The current status of this task" msgstr "O status atual desta tarefa" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "O dispositivo no qual reside o diretório de cache de imagem " "%(image_cache_dir)s não suporta xattr. É provável que você precise editar " "fstab e incluir a opção user_xattr na linha apropriada do dispositivo que " "contém o diretório de cache." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "O URI fornecido não é válido. Especifique um uri válido a partir da seguinte " "lista de URI suportados %(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "A imagem recebida é muito grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "O arquivo-chave especificado %s não existe" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de localizações de imagens permitidas. " "Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de membros de imagem permitidos para esta " "imagem. Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de propriedades de imagem permitidas. " "Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de tags de imagem permitidas. Tentativa: " "%(attempted)s, Máximo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "O local %(location)s já existe" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Os dados da localização têm um ID inválido: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Definição de metadados %(record_type)s com o nome=%(record_name)s não " "excluída. Outros registros ainda se referem a ela." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "O namespace de definição de metadados=%(namespace_name)s já existe." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "O objeto de definição de metadados com o nome=%(object_name)s não foi " "localizado no namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "A propriedade de definição de metadados com o nome=%(property_name)s não foi " "localizada no namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "A associação do tipo de recurso de definição de metadados do tipo derecurso=" "%(resource_type_name)s ao namespace=%(namespace_name)s já existe." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "A associação do tipo de recurso de definição de metadados do tipo derecurso=" "%(resource_type_name)s ao namespace=%(namespace_name)s, não foi localizada." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "O tipo de recurso de definição de metadados com o nome=" "%(resource_type_name)s, não foi localizado." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "A identificação da definição de metadados com o nome=%(name)s não foi " "localizada no namespace=%(namespace_name)s." msgid "The parameters required by task, JSON blob" msgstr "Os parâmetros requeridos pela tarefa, blob JSON" msgid "The provided image is too large." msgstr "A imagem fornecida é muito grande." msgid "The request returned 500 Internal Server Error." msgstr "A solicitação retornou 500 Erro Interno do Servidor." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "A solicitação retornou 503 Serviço Indisponível. Isso geralmente ocorre em " "sobrecarga de serviço ou outra interrupção temporária." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "A solicitação retornou 302 Várias Opções. Isso geralmente significa que você " "não incluiu um indicador de versão em um URI de solicitação.\n" "\n" "O corpo da resposta retornou:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "A solicitação retornou 413 Entidade de Solicitação Muito Grande. Isso " "geralmente significa que a taxa de limitação ou um limite de cota foi " "violado.\n" "\n" "O corpo de resposta:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "A solicitação retornou um status inesperado: %(status)s.\n" "\n" "O corpo de resposta:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "A imagem solicitada foi desativada. O download de dados da imagem é proibido." msgid "The result of current task, JSON blob" msgstr "O resultado da tarefa atual, blob JSON" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "O tamanho dos dados que %(image_size)s irá exceder do limite. %(remaining)s " "bytes restantes." #, python-format msgid "The specified member %s could not be found" msgstr "O membro especificado %s não pôde ser localizado" #, python-format msgid "The specified metadata object %s could not be found" msgstr "O objeto de metadados especificado %s não pôde ser localizado" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "A identificação de metadados especificada %s não pôde ser localizada" #, python-format msgid "The specified namespace %s could not be found" msgstr "O namespace especificado %s não pôde ser localizado" #, python-format msgid "The specified property %s could not be found" msgstr "A propriedade especificada %s não pôde ser localizada" #, python-format msgid "The specified resource type %s could not be found " msgstr "O tipo de recurso especificado %s não pôde ser localizado " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "O status de local da imagem excluída só pode ser definido como " "'pending_delete' ou 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "O status de local da imagem excluída só pode ser definido como " "'pending_delete' ou 'deleted'." msgid "The status of this image member" msgstr "O status desse membro da imagem" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "O membro de destino %(member_id)s já está associado à imagem %(image_id)s." msgid "The type of task represented by this content" msgstr "O tipo de tarefa representada por este conteúdo" msgid "The unique namespace text." msgstr "O texto do namespace exclusivo." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "O nome fácil do namespace. Usando pela interface com o usuário, se " "disponível." msgid "There was an error configuring the client." msgstr "Houve um erro ao configurar o cliente." msgid "There was an error connecting to a server" msgstr "Houve um erro ao conectar a um servidor" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Esta operação não é atualmente permitida em Tarefas do Glance. Elas são " "automaticamente excluídas após atingir o tempo com base em sua propriedade " "expires_at." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Tempo em horas durante o qual uma tarefa é mantida, com êxito ou falha" msgid "Too few arguments." msgstr "Muito poucos argumentos." msgid "URL to access the image file kept in external store" msgstr "URL para acessar o arquivo de imagem mantido no armazenamento externo " #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossível criar arquivo pid %(pid)s. Executando como não raiz?\n" "Voltando para um arquivo temporário, é possível parar o serviço %(service)s " "usando:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Não é possível filtrar por operador desconhecido '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "Não é possível filtrar um intervalo com um valor não numérico." msgid "Unable to filter on a unknown operator." msgstr "Não é possível filtrar em um operador desconhecido." msgid "Unable to filter using the specified operator." msgstr "Não é possível filtrar usando o operador especificado." msgid "Unable to filter using the specified range." msgstr "Não é possível filtrar usando o intervalo especificado." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Não é possível localizar '%s' na mudança de Esquema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Não é possível localizar `op` na mudança de Esquema JSON. Deve ser um dos " "seguintes: %(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Não é possível aumentar o limite do descritor de arquivo. Executando como " "não-raiz?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Não é possível carregar %(app_name)s do arquivo de configuração " "%(conf_file)s.\n" "Obtido: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Não é possível carregar o esquema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Impossível localizar o arquivo de configuração de colagem para %s." msgid "Unexpected body type. Expected list/dict." msgstr "Tipo de corpo inesperado. Lista/dicionário esperados." #, python-format msgid "Unexpected response: %s" msgstr "Resposta inesperada: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Estratégia de autenticação desconhecida %s'" #, python-format msgid "Unknown command: %s" msgstr "Comando desconhecido: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versão rascunho do Esquema JSON não reconhecida" msgid "Virtual size of image in bytes" msgstr "Tamanho virtual de imagem em bytes " #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Esperou 15 segundos para pid %(pid)s (%(file)s) ser eliminado; desistindo" msgid "You are not authenticated." msgstr "Você não está autenticado." msgid "You are not authorized to complete this action." msgstr "Você não está autorizado a concluir esta ação." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Você não está autorizado a consultar a imagem %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Você não está autorizado a consultar os membros da imagem %s." msgid "You are not permitted to create image members for the image." msgstr "Você não tem permissão para criar membros da imagem." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Você não tem permissão para criar imagens de propriedade de '%s'." msgid "You do not own this image" msgstr "Você não possui essa imagem" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Você optou por usar SSL na conexão e forneceu um certificado, mas falhou em " "fornecer um parâmetro key_file ou configurar a variável de ambiente " "GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Você optou por usar SSL na conexão e forneceu uma chave, mas falhou em " "fornecer um parâmetro cert_file ou configurar a variável de ambiente " "GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() obteve argumento de palavra-chave inesperado '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Não é possível a transição de %(current)s para %(next)s na atualização " "(desejado from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "conflito de propriedades customizadas (%(props)s) com propriedades de base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "nem o hub 'poll' nem o 'selects' do eventlet estão disponíveis nesta " "plataforma" msgid "limit param must be an integer" msgstr "o parâmetro limit deve ser um número inteiro" msgid "limit param must be positive" msgstr "o parâmetro limit deve ser positivo" msgid "md5 hash of image contents." msgstr "Hash md5 do conteúdo da imagem." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() obteve palavras-chave inesperadas %s" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "Não é possível ativar %(serv)s. Obteve erro: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id é muito longo; tamanho máximo %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/ru/0000775000175000017500000000000000000000000016174 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/ru/LC_MESSAGES/0000775000175000017500000000000000000000000017761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/ru/LC_MESSAGES/glance.po0000664000175000017500000017575600000000000021577 0ustar00zuulzuul00000000000000# Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:21+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "" "В последнем вызове rpc возникла исключительная ситуация %(cls)s: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s не найден в списке элементов образа %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) работает..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s уже запущен: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "Служба %(task_id)s типа %(task_type)s настроена неправильно. Не удалось " "загрузить хранилище в файловой системе" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "Служба %(task_id)s типа %(task_type)s настроена неправильно. Отсутствует " "рабочий каталог: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s на %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s %(serv)s с %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Укажите пару host:port, где host - это адрес IPv4, адрес IPv6, имя хоста " "или FQDN. При указании адреса IPv6 заключите его в квадратные скобки " "отдельно от порта (например, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s не может содержать символы в кодировке 4-байтового unicode." #, python-format msgid "%s is already stopped" msgstr "%s уже остановлен" #, python-format msgid "%s is stopped" msgstr "%s остановлен" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Опция --os_auth_url или переменная среды OS_AUTH_URL требуется, если " "включена стратегия идентификации Keystone\n" msgid "A body is not expected with this request." msgstr "В этом запросе не должно быть тела." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Объект определения метаданных с именем %(object_name)s уже существует в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Свойство определения метаданных с именем %(property_name)s уже существует в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Тип ресурса определения метаданных с именем %(resource_type_name)s уже " "существует." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Набор URL для доступа к файлу образа, находящемуся во внешнем хранилище" msgid "Amount of disk space (in GB) required to boot image." msgstr "Объем дисковой памяти (в ГБ), необходимой для загрузки образа." msgid "Amount of ram (in MB) required to boot image." msgstr "Объем оперативной памяти (в МБ), необходимой для загрузки образа." msgid "An identifier for the image" msgstr "Идентификатор образа" msgid "An identifier for the image member (tenantId)" msgstr "Идентификатор участника образа (tenantId)" msgid "An identifier for the owner of this task" msgstr "Идентификатор владельца задачи" msgid "An identifier for the task" msgstr "Идентификатор задачи" msgid "An image file url" msgstr "url файла образа" msgid "An image schema url" msgstr "url схемы образа" msgid "An image self url" msgstr "Собственный url образа" msgid "An import task exception occurred" msgstr "Исключительная ситуация в задаче импорта" msgid "An object with the same identifier already exists." msgstr "Объект с таким идентификатором уже существует." msgid "An object with the same identifier is currently being operated on." msgstr "Объект с таким идентификатором занят в текущей операции." msgid "An object with the specified identifier was not found." msgstr "Объект с указанным идентификатором не найден." msgid "An unknown exception occurred" msgstr "Возникла неизвестная исключительная ситуация" msgid "An unknown task exception occurred" msgstr "Непредвиденная исключительная ситуация" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Атрибут '%(property)s' предназначен только для чтения." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "Атрибут '%(property)s' зарезервирован." #, python-format msgid "Attribute '%s' is read-only." msgstr "Атрибут '%s' предназначен только для чтения." #, python-format msgid "Attribute '%s' is reserved." msgstr "Атрибут '%s' зарезервирован." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "container_format атрибута может быть заменен только для образа, находящегося " "в очереди." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "disk_format атрибута может быть заменен только для образа, находящегося в " "очереди." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Служба идентификации с URL %(url)s не найдена." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Ошибка идентификации. Возможно, время действия маркера истекло во время " "загрузки файла. Данные образа для %s будут удалены." msgid "Authorization failed." msgstr "Доступ не предоставлен." msgid "Available categories:" msgstr "Доступные категории:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Недопустимый формат фильтра запроса \"%s\". Используйте нотацию DateTime ISO " "8601." #, python-format msgid "Bad header: %(header_name)s" msgstr "Неправильный заголовок: %(header_name)s" msgid "Body expected in request." msgstr "В запросе ожидалось тело." msgid "Cannot be a negative value" msgstr "Значение не может быть отрицательным" msgid "Cannot be a negative value." msgstr "Не может быть отрицательным значением." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "Не удается преобразовать %(key)s '%(value)s' в целое число." msgid "Cannot remove last location in the image." msgstr "Нельзя удалять последнее расположение из образа." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Не удается сохранить данные для образа %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Список расположений не может быть пустым." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Проверка контрольной суммой не выполнена. Кэширование образа '%s' прервано." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Ошибка соединения или неправильный запрос к службе идентификации с URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "Сформированный URL: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Образ %(image_id)s скачан поврежденным" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Не удалось выполнить связывание с %(host)s:%(port)s в течение 30 секунд" msgid "Could not find OVF file in OVA archive file." msgstr "Не найден файл OVF в файле архива OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Не найден объект метаданных %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Не удалось найти тег метаданных %s" #, python-format msgid "Could not find property %s" msgstr "Не найдено свойство %s" #, python-format msgid "Could not find task %s" msgstr "Задача %s не найдена" #, python-format msgid "Could not update image: %s" msgstr "Не удалось изменить образ: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "В настоящее время пакеты OVA с несколькими дисками не поддерживаются." msgid "Data supplied was not valid." msgstr "Предоставленные данные недопустимы." msgid "Date and time of image member creation" msgstr "Дата и время создания участника образа" msgid "Date and time of image registration" msgstr "Дата и время регистрации образа" msgid "Date and time of last modification of image member" msgstr "Дата и время последней модификации участника образа" msgid "Date and time of namespace creation" msgstr "Дата и время создания пространства имен" msgid "Date and time of object creation" msgstr "Дата и время создания объекта" msgid "Date and time of resource type association" msgstr "Дата и время связывания типа ресурса" msgid "Date and time of tag creation" msgstr "Дата и время создания тега" msgid "Date and time of the last image modification" msgstr "Дата и время последнего изменения образа" msgid "Date and time of the last namespace modification" msgstr "Дата и время последнего изменения пространства имен" msgid "Date and time of the last object modification" msgstr "Дата и время последнего изменения объекта" msgid "Date and time of the last resource type association modification" msgstr "Дата и время последнего изменения связи типа ресурса" msgid "Date and time of the last tag modification" msgstr "Дата и время последнего изменения тега" msgid "Datetime when this resource was created" msgstr "Дата и время создания ресурса" msgid "Datetime when this resource was updated" msgstr "Дата и время обновления ресурса" msgid "Datetime when this resource would be subject to removal" msgstr "Дата и время планового удаления ресурса" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Попытка загрузить образ с превышением квоты отклонена: %s" msgid "Descriptive name for the image" msgstr "Описательное имя образа" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Драйвер %(driver_name)s не удалось правильно настроить. Причина: %(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Ошибка при декодировании запроса. URL или тело запроса содержат символы, " "которые Glance не способен декодировать" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Ошибка при выборке элементов образа %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Ошибка в конфигурации хранилища. Добавление образов в хранилище отключено." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Элемент должен быть задан в формате: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Состояние должно быть указано в формате: {\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Найти образ для удаления %(image_id)s не удалось" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Не удалось найти тип ресурса %(resourcetype)s для удаления" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "Инициализировать базу данных кэша образов не удалось. Ошибка: %s" #, python-format msgid "Failed to read %s from config" msgstr "Прочесть %s из конфигурации не удалось" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Загрузить данные образа %(image_id)s не удалось из-за ошибки HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Загрузить данные образа %(image_id)s не удалось из-за внутренней ошибки: " "%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Файл %(path)s содержит недопустимый базовый файл %(bfile)s, принудительное " "завершение." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Импорты на основе файлов не разрешены. Используйте нелокальный источник " "данных образа." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Запрещенный запрос: пространство имен %s определения метаданных невидимое." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Запрос запрещается, задача %s невидима" msgid "Format of the container" msgstr "Формат контейнера" msgid "Format of the disk" msgstr "Формат диска" #, python-format msgid "Host \"%s\" is not valid." msgstr "Хост \"%s\" недопустим." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Хост и порт \"%s\" недопустимы." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Информационное сообщение для пользователя добавляется только в " "соответствующих случаях (обычно в случае ошибки)" msgid "If true, image will not be deletable." msgstr "Если значение равно true, то образ нельзя будет удалить." msgid "If true, namespace will not be deletable." msgstr "Если true, пространство имен будет неудаляемым." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "Не удается удалить образ %(id)s, так как он используется: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Образ %(image_id)s не найден после загрузки. Возможно, он удален во время " "загрузки: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "Образ %(image_id)s защищен и не может быть удален." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Образ %s не найден после загрузки. Возможно, он был удален во время " "передачи, выполняется очистка переданных фрагментов." #, python-format msgid "Image %s not found." msgstr "Образ %s не найден." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Размер образа превышает квоту хранилища: %s" msgid "Image id is required." msgstr "Требуется ИД образа." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Превышено предельно допустимое число участников для образа %(id)s: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Изменять состояние %(cur_status)s образа на %(new_status)s не разрешается" #, python-format msgid "Image storage media is full: %s" msgstr "Носитель образов переполнен: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Превышено предельно допустимое число тегов для образа %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Неполадка при передаче образа: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Не найден образ с заданным ИД %(image_id)s" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Неправильная стратегия идентификации, ожидалось \"%(expected)s\", но " "получено \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Неправильный запрос: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Ввод не содержит поле %(key)s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Недостаточные права для доступа к носителю образов: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Недопустимый указатель JSON для этого ресурса: '%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "Недопустимая конфигурация в файле конфигурации glance-swift." msgid "Invalid configuration in property protection file." msgstr "Недопустимая конфигурация в файле защиты свойств." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Недопустимый тип содержимого: %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Недопустимое значение фильтра %s. Нет закрывающей кавычки." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Недопустимое значение фильтра %s. Нет запятой после закрывающей кавычки." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Недопустимое значение фильтра %s. Нет запятой перед открывающей кавычкой." msgid "Invalid location" msgstr "Недопустимое расположение" #, python-format msgid "Invalid location: %s" msgstr "Недопустимое расположение: %s" msgid "Invalid locations" msgstr "Недопустимые расположения" #, python-format msgid "Invalid locations: %s" msgstr "Недопустимые расположения: %s" msgid "Invalid marker format" msgstr "Недопустимый формат маркера" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Недопустимая операция: `%(op)s`. Допускается одна из следующих операций: " "%(available)s." msgid "Invalid position for adding a location." msgstr "Недопустимая позиция для добавления расположения." msgid "Invalid position for removing a location." msgstr "Недопустимая позиция для удаления расположения." msgid "Invalid service catalog json." msgstr "Недопустимый json каталога службы." #, python-format msgid "Invalid sort direction: %s" msgstr "Недопустимое направление сортировки: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Недопустимый ключ сортировки %(sort_key)s. Допускается один из следующих " "ключей: %(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Недопустимое значение состояния: %s" #, python-format msgid "Invalid status: %s" msgstr "Недопустимое состояние: %s" #, python-format msgid "Invalid type value: %s" msgstr "Недопустимое значение типа: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Недопустимое обновление. Оно создает пространство имен определения " "метаданных с таким же именем, как у пространства имен %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает объект определения метаданных с таким " "же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает объект определения метаданных с таким " "же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает пространство имен определения " "метаданных с таким же именем, как у свойства %(name)s в пространстве имен " "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Неверное значение '%(value)s' параметра '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Недопустимое значение для опции %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Недопустимое значение видимости: %s" msgid "It's not allowed to add locations if locations are invisible." msgstr "Не разрешено добавлять расположения, если они невидимы." msgid "It's not allowed to remove locations if locations are invisible." msgstr "Не разрешено удалять расположения, если они невидимы." msgid "It's not allowed to update locations if locations are invisible." msgstr "Не разрешено обновлять расположения, если они невидимы." msgid "List of strings related to the image" msgstr "Список строк, относящихся к образу" msgid "Malformed JSON in request body." msgstr "Неправильно сформированный JSON в теле запроса." msgid "Maximal age is count of days since epoch." msgstr "Максимальный возраст - число дней с начала эпохи." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Превышено максимальное количество перенаправлений (%(redirects)s)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Обнаружена копия участника %(member_id)s для образа %(image_id)s" msgid "Member can't be empty" msgstr "Участник не может быть пустым" msgid "Member to be added not specified" msgstr "Добавляемый участник не указан" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Пространство имен %(namespace)s определения метаданных защищено и не может " "быть удален." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Не найдено пространство имен определения метаданных для ИД %s" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "Объект %(object_name)s определения метаданных защищен и не может быть удален." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Не найден объект определения метаданных для ИД %s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "Свойство %(property_name)s определения метаданных защищено и не может быть " "удалено." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Не найдено свойство определения метаданных для ИД %s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Тип ресурса %(resource_type_name)s определения метаданных являетсясистемным " "типом и не может быть удален." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Связь типа ресурса %(resource_type)s определения метаданных защищена и не " "может быть удалена." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Тег %(tag_name)s определения метаданных защищен и не может быть удален." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Не найден тег определения метаданных для ИД %s" #, python-format msgid "Missing required credential: %(required)s" msgstr "Отсутствуют обязательные идентификационные данные: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Несколько соответствий службы 'image' для региона %(region)s. Обычно это " "означает, что регион обязателен, но вы его не указали." #, python-format msgid "No image found with ID %s" msgstr "Образ с ИД %s не найден" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Расположение с ИД %(loc)s из образа %(img)s не найдено" #, python-format msgid "Not allowed to create members for image %s." msgstr "Не разрешено создавать участников для образа %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Запрещено деактивировать образ в состоянии %s" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Не разрешено удалять участников для образа %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Не разрешено удалять теги для образа %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Запрещено повторно активировать образ в состоянии %s" #, python-format msgid "Not allowed to update members for image %s." msgstr "Не разрешено изменять участников для образа %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Не разрешено изменять теги для образа %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "Загружать данные для образа %(image_id)s не разрешено: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "Число направлений сортировки не совпадает с числом ключей сортировки" msgid "OVA extract is limited to admin" msgstr "Распаковку OVA может выполнить только администратор" msgid "Old and new sorting syntax cannot be combined" msgstr "Прежний и новый синтаксисы сортировки нельзя смешивать" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "Операции \"%s\" требуется участник с именем \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Объекты операции должны содержать в точности один участник с именем \"add\", " "\"remove\" или \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Объекты операции должны содержать только один участник с именем \"add\", " "\"remove\" или \"replace\"." msgid "Operations must be JSON objects." msgstr "Операции должны быть объектами JSON." #, python-format msgid "Original locations is not empty: %s" msgstr "Исходные расположения не пусты: %s" msgid "Owner can't be updated by non admin." msgstr "Обычный пользователь не может изменить владельца." msgid "Owner of the image" msgstr "Владелец образа" msgid "Owner of the namespace." msgstr "Владелец пространства имен." msgid "Param values can't contain 4 byte unicode." msgstr "" "Значения параметров не могут содержать символы в кодировке 4-байтового " "unicode." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Указатель `%s` содержит символ \"~\", не входящий в распознаваемую Esc-" "последовательность." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Указатель `%s` содержит смежный \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Указатель `%s` не содержит допустимого маркера." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Указатель `%s` не начинается с \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Указатель `%s` оканчивается на \"/\"." #, python-format msgid "Port \"%s\" is not valid." msgstr "Порт \"%s\" недопустим." #, python-format msgid "Process %d not running" msgstr "Процесс %d не выполняется" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Свойства %s должны быть заданы до сохранения данных." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "Свойство %(property_name)s не начинается с ожидаемого префикса связи типа " "ресурса '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Свойство %s уже существует." #, python-format msgid "Property %s does not exist." msgstr "Свойство %s не существует." #, python-format msgid "Property %s may not be removed." msgstr "Свойство %s нельзя удалить." #, python-format msgid "Property %s must be set prior to saving data." msgstr "Свойство %s должно быть задано до сохранения данных." msgid "Property names can't contain 4 byte unicode." msgstr "" "Имена свойств не могут содержать символы в кодировке 4-байтового unicode." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Предоставленный объект не соответствует схеме '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Указано неподдерживаемое состояние задачи: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Указан неподдерживаемый тип задачи: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Описание пространства имен для пользователя." msgid "Received invalid HTTP redirect." msgstr "Получено недопустимое перенаправление HTTP." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Перенаправляется на %(uri)s для предоставления доступа." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "Реестр настроен неправильно на сервере API. Причина: %(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Перезагрузка %(serv)s не поддерживается" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Перезагрузка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Удаление устаревшего файла pid %s" msgid "Request body must be a JSON array of operation objects." msgstr "Тело запроса должно быть массивом JSON объектов операции." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Ответ от Keystone не содержит конечной точки Glance." msgid "Scope of image accessibility" msgstr "Область доступности образа" msgid "Scope of namespace accessibility." msgstr "Область доступности пространства имен." #, python-format msgid "Server %(serv)s is stopped" msgstr "Сервер %(serv)s остановлен" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Создать исполнитель сервера не удалось: %(reason)s." msgid "Signature verification failed" msgstr "Проверка подписи не выполнена." msgid "Size of image file in bytes" msgstr "Размер файла образа в байтах" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Некоторые типы ресурсов допускают более одной пары ключ-значение на " "экземпляр. Например, в Cinder разрешены метаданные пользователей и образов " "для томов. Только метаданные свойств образа обрабатываются Nova " "(планирование или драйверы). Это свойство позволяет целевому объекту " "пространства имен устранить неоднозначность." msgid "Sort direction supplied was not valid." msgstr "Указано недопустимое направление сортировки." msgid "Sort key supplied was not valid." msgstr "Задан недопустимый ключ сортировки." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Задает префикс для данного типа ресурсов. Все свойства в пространстве имен " "должны иметь этот префикс при применении к указанному типу ресурсов. Должен " "использоваться разделитель префикса (например, двоеточие :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Состояние должно быть \"pending\", \"accepted\" или \"rejected\"." msgid "Status not specified" msgstr "Состояние не указано" msgid "Status of the image" msgstr "Состояние образа" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "Изменять состояние %(cur_status)s на %(new_status)s не разрешается" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Остановка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "Поддерживаемые значения атрибута образа 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Поддерживаемые значения атрибута образа 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Повторное порождение подавлено, поскольку %(serv)s был %(rsn)s." msgid "System SIGHUP signal received." msgstr "Получен системный сигнал SIGHUP." #, python-format msgid "Task '%s' is required" msgstr "Требуется задача '%s'" msgid "Task does not exist" msgstr "Задача не существует" msgid "Task failed due to Internal Error" msgstr "Задача не выполнена из-за внутренней ошибки" msgid "Task was not configured properly" msgstr "Задача неправильно настроена" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Задача с указанным ИД %(task_id)s не найдена" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Фильтр \"changes-since\" больше недоступен в v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Указанный файл CA %s не существует" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "Объект образа %(image_id)s, создаваемый с помощью задачи %(task_id)s, больше " "не находится в допустимом состоянии для дальнейшей обработки." msgid "The Store URI was malformed." msgstr "URI хранилища неправильно сформирован." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Указанный файл сертификата %s не существует" msgid "The current status of this task" msgstr "Текущее состояние задачи" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "Устройство, на котором размещен каталог %(image_cache_dir)s кэша образов, не " "поддерживает xattr. По-видимому, вам нужно отредактировать fstab, добавив " "опцию user_xattr в соответствующую строку для устройства, на котором " "размещен каталог кэша." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Заданный uri недопустим. Укажите допустимый uri из следующего списка " "поддерживаемых uri %(supported)s" #, python-format msgid "The incoming image is too large: %s" msgstr "Чересчур большой размер входящего образа: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Указанный файл ключа %s не существует" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных расположений образа. Указанное " "число: %(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных участников данного образа. " "Указанное число: %(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных свойств образа. Указанное число: " "%(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных тегов образа. Указанное число: " "%(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Расположение %(location)s уже существует" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Данные о расположении содержат недопустимый ИД: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Определение метаданных %(record_type)s с именем %(record_name)s не удалено. " "Другие записи все еще ссылаются на него." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Пространство имен %(namespace_name)s определения метаданных уже существует." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Объект определения метаданных с именем %(object_name)s не найден в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Свойство определения метаданных с именем %(property_name)s не найдено в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Связь типа ресурса определения метаданных для типа ресурса" "%(resource_type_name)s и пространства имен %(namespace_name)s уже существует." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Связь типа ресурса определения метаданных для типа ресурса" "%(resource_type_name)s и пространства имен %(namespace_name)s не найдена." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Тип ресурса определения метаданных с именем %(resource_type_name)s не найден." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Тег определения метаданных с именем %(name)s не найден в пространстве имен " "%(namespace_name)s." msgid "The parameters required by task, JSON blob" msgstr "Параметры, обязательные для задачи JSON blob" msgid "The provided image is too large." msgstr "Предоставленный образ слишком велик." msgid "The request returned 500 Internal Server Error." msgstr "Запрос возвратил ошибку 500 - Внутренняя ошибка сервера." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "Запрос возвратил ошибку 503 - Служба недоступна. Как правило, это происходит " "при перегруженности службы или другом временном сбое." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "Запрос возвратил ошибку 302 - Множественный выбор. Как правило, это " "означает, что вы не включили индикатор версии в URI запроса.\n" "\n" "Возвращенное тело запроса:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Запрос возвратил ошибку 413 - Сущность запроса слишком велика. Как правило, " "это означает, что нарушено ограничение на скорость или порог квоты.\n" "\n" "Тело ответа:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Запрос возвратил непредвиденное состояние: %(status)s.\n" "\n" "Тело ответа:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "Запрошенный образ деактивирован. Загрузка данных образа запрещена." msgid "The result of current task, JSON blob" msgstr "Результат текущей задачи JSON blob" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "Объем данных %(image_size)s превышает допустимый максимум. Остаток: " "%(remaining)s байт." #, python-format msgid "The specified member %s could not be found" msgstr "Указанный участник %s не найден" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Указанный объект метаданных %s не найден" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Не удалось найти указанный тег метаданных %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "Указанное пространство имен %s не найдено" #, python-format msgid "The specified property %s could not be found" msgstr "Указанное свойство %s не найдено" #, python-format msgid "The specified resource type %s could not be found " msgstr "Указанный тип ресурса %s не найден " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Состояние расположения удаленного образа может быть равно только " "'pending_delete' или 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Состояние расположения удаленного образа может быть равно только " "'pending_delete' или 'deleted'." msgid "The status of this image member" msgstr "Состояние этого участника образа" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "Целевой участник %(member_id)s уже связан с образом %(image_id)s." msgid "The type of task represented by this content" msgstr "Тип задачи, представленной этим содержимым" msgid "The unique namespace text." msgstr "Уникальный текст пространства имен." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Имя пространства имен для пользователя. Используется в пользовательском " "интерфейсе." msgid "There was an error configuring the client." msgstr "При настройке клиента произошла ошибка." msgid "There was an error connecting to a server" msgstr "При подключении к серверу произошла ошибка" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Эта операция в настоящее время не разрешена для задач Glance. Они " "автоматически удаляются после достижения срока, указанного в их свойстве " "expires_at." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Время (ч) существования задачи после успешного выполнения или завершения с " "ошибкой" msgid "Too few arguments." msgstr "Недостаточно аргументов." msgid "URL to access the image file kept in external store" msgstr "URL для доступа к файлу образа, находящемуся во внешнем хранилище" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Не удается создать файл pid %(pid)s. Запущен без прав доступа root?\n" "Возврат к файлу temp, для завершения работы службы %(service)s:\n" " остановить %(file)s %(server)s - pid-файл %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Не удается отфильтровать с использованием неизвестного оператора: '%s'" msgid "Unable to filter on a range with a non-numeric value." msgstr "Отфильтровать по диапазону с нечисловым значением невозможно." msgid "Unable to filter on a unknown operator." msgstr "Не удается отфильтровать с использованием неизвестного оператора." msgid "Unable to filter using the specified operator." msgstr "Не удается отфильтровать с использованием указанного оператора." msgid "Unable to filter using the specified range." msgstr "Отфильтровать согласно указанному диапазону невозможно." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "'%s' не найден в изменении схемы JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Не удалось найти `op` в изменении схемы JSON. Допускается одно из следующих " "значений: %(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Не удается увеличить предельное значение для дескриптора файлов. Запущен без " "прав доступа root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Невозможно загрузить %(app_name)s из файла конфигурации %(conf_file)s.\n" "Ошибка: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Не удалось загрузить схему: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Не удается найти/вставить файл конфигурации для %s." msgid "Unexpected body type. Expected list/dict." msgstr "Непредвиденный тип тела. Ожидался список или словарь." #, python-format msgid "Unexpected response: %s" msgstr "Непредвиденный ответ: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Неизвестная стратегия идентификации: '%s'" #, python-format msgid "Unknown command: %s" msgstr "Неизвестная команда: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Нераспознанная версия черновика схемы JSON" msgid "Virtual size of image in bytes" msgstr "Виртуальный размер образа в байтах" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Система ожидала завершения pid %(pid)s (%(file)s) в течение 15 секунд; " "освобождение" msgid "You are not authenticated." msgstr "Вы не прошли идентификацию." msgid "You are not authorized to complete this action." msgstr "У вас нет прав на выполнение этого действия." #, python-format msgid "You are not authorized to lookup image %s." msgstr "У вас нет прав доступа для поиска образа %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "У вас нет прав доступа для поиска элементов образа %s." msgid "You are not permitted to create image members for the image." msgstr "Вам не разрешено создавать участники образов для данного образа." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Вам не разрешено создавать образы, принадлежащие '%s'." msgid "You do not own this image" msgstr "Этот образ вам не принадлежит" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Вы выбрали применение SSL в соединении и предоставили сертификат, однако вам " "не удалось ни предоставить параметр key_file, ни задать переменную среды " "GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Вы выбрали применение SSL в соединении и предоставили ключ, однако вам не " "удалось ни предоставить параметр cert_file, ни задать переменную среды " "GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "В __init__() получен непредвиденный именованный аргумент '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "не удается выполнить переход от %(current)s к %(next)s при обновлении " "(требуется from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "настраиваемые свойства (%(props)s) конфликтуют с базовыми свойствами" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Для этой платформы отсутствуют центры обработки событий poll и selects " "библиотеки eventlet" msgid "limit param must be an integer" msgstr "Параметр limit должен быть целым числом" msgid "limit param must be positive" msgstr "Параметр limit должен быть положительным" msgid "md5 hash of image contents." msgstr "Хэш md5 содержимого образа." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "В new_image() получены непредвиденные ключевые слова %s" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "не удается запустить %(serv)s. Ошибка: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "Слишком большая длина x-openstack-request-id, максимальная длина: %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/tr_TR/0000775000175000017500000000000000000000000016600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/tr_TR/LC_MESSAGES/0000775000175000017500000000000000000000000020365 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/tr_TR/LC_MESSAGES/glance.po0000664000175000017500000012657000000000000022171 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andreas Jaeger , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:22+0000\n" "Last-Translator: Copied by Zanata \n" "Language: tr_TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Son rpc çağrısında %(cls)s istisnası oluştu: %(val)s" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) çalıştırılıyor..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s çalışıyor görünüyor: %(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " "yapılandırılamadı. Dosya sistem deposuna yüklenemedi" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " "yapılandırılamadı. Eksik çalışma dizini: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(conf)s ile %(verb)sing %(serv)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Lütfen istemcinin bir IPv4, IPv6 adresi, makine adı ya da FQDN olduğu bir " "istemci:bağlantı noktası çifti belirtin. Eğer IPv6 kullanılırsa, bağlantı " "noktasından ayrı parantez içine alın (örneğin, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 4 bayt unicode karakterler içeremez." #, python-format msgid "%s is already stopped" msgstr "%s zaten durdurulmuş" #, python-format msgid "%s is stopped" msgstr "%s durduruldu" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "--os_auth_url seçeneği ya da OS_AUTH_URL ortam değişkeni, keystone kimlik " "doğrulama stratejisi etkinken gereklidir\n" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(object_name)s ile bir metadata tanım nesnesi ad alanında=" "%(namespace_name)s zaten var." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(property_name)s ile bir metadata tanım özelliği ad alanında=" "%(namespace_name)s zaten mevcut." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ad=%(resource_type_name)s ile bir metadata tanım kaynak-türü zaten mevcut." msgid "A set of URLs to access the image file kept in external store" msgstr "Harici depoda tutulan imaj dosyasına erişilecek URL kümesi" msgid "Amount of disk space (in GB) required to boot image." msgstr "İmajı ön yüklemek için gereken disk alanı miktarı (GB olarak)." msgid "Amount of ram (in MB) required to boot image." msgstr "İmaj ön yüklemesi için gereken (MB olarak) bellek miktarı." msgid "An identifier for the image" msgstr "İmaj için bir tanımlayıcı" msgid "An identifier for the image member (tenantId)" msgstr "İmaj üyesi için bir tanımlayıcı (tenantId)" msgid "An identifier for the owner of this task" msgstr "Görevin sahibi için bir tanımlayıcı" msgid "An identifier for the task" msgstr "Görev için bir tanımlayıcı" msgid "An object with the same identifier already exists." msgstr "Aynı tanımlayıcı ile bir nesne zaten mevcut." msgid "An object with the same identifier is currently being operated on." msgstr "Aynı tanımlayıcıya sahip bir nesne şu anda işleniyor." msgid "An object with the specified identifier was not found." msgstr "Belirtilen tanımlayıcı ile bir nesne bulunamadı." msgid "An unknown exception occurred" msgstr "Bilinmeyen olağandışı bir durum oluştu" msgid "An unknown task exception occurred" msgstr "Bilinmeyen bir görev olağandışı durumu oluştu" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "'%(property)s' özniteliği salt okunurdur." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "'%(property)s' özniteliği ayrılmıştır." #, python-format msgid "Attribute '%s' is read-only." msgstr "'%s' özniteliği salt okunurdur." #, python-format msgid "Attribute '%s' is reserved." msgstr "'%s' özniteliği ayrılmıştır." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "container_format özniteliği sadece kuyruğa alınmış bir imaj için " "değiştirilebilir." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "disk_format özniteliği sadece kuyruğa alınmış bir imaj için değiştirilebilir." #, python-format msgid "Auth service at URL %(url)s not found." msgstr "%(url)s URL'inde kimlik doğrulama servisi bulunamadı." msgid "Authorization failed." msgstr "Yetkilendirme başarısız oldu." msgid "Available categories:" msgstr "Kullanılabilir kategoriler:" #, python-format msgid "Bad header: %(header_name)s" msgstr "Kötü başlık: %(header_name)s" msgid "Body expected in request." msgstr "İstekte beklenen vücut." msgid "Cannot be a negative value" msgstr "Negatif bir değer olamaz" msgid "Cannot be a negative value." msgstr "Negatif bir değer olamaz." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "%(key)s '%(value)s' imaj değeri bir tam sayıya dönüştürülemez." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "%(image_id)s imajı için veri kaydedilemiyor: %(error)s" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Sağlama doğrulama başarısız oldu. '%s' imajını önbelleğe alma işlemi " "durduruldu." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "%(url)s URL'indeki kimlik doğrulama servisine bağlantı hatası/hatalı istek." #, python-format msgid "Constructed URL: %s" msgstr "URL inşa edildi: %s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "%(image_id)s imajı için bozuk imaj indir" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30 saniyelik denemeden sonra %(host)s:%(port)s bağlanamadı" #, python-format msgid "Could not find metadata object %s" msgstr "Metadata nesnesi %s bulunamadı" #, python-format msgid "Could not find metadata tag %s" msgstr "%s metadata etiketi bulunamadı" #, python-format msgid "Could not find property %s" msgstr "%s özelliği bulunamadı" #, python-format msgid "Could not find task %s" msgstr "%s görevi bulunamadı" #, python-format msgid "Could not update image: %s" msgstr "İmaj güncellenemiyor: %s" msgid "Data supplied was not valid." msgstr "Sağlanan veri geçersizdir." msgid "Date and time of image member creation" msgstr "İmaj üyesi oluşturma tarih ve saati" msgid "Date and time of last modification of image member" msgstr "İmaj üyesi son değişiklik tarih ve saati" msgid "Datetime when this resource was created" msgstr "Bu kaynak oluşturulduğundaki tarih saat" msgid "Datetime when this resource was updated" msgstr "Bu kaynak güncellendiğindeki tarih saat" msgid "Datetime when this resource would be subject to removal" msgstr "Bu kaynağın kaldırılacağı tarih zaman" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "İmaj yükleme girişimi kotayı aştığından dolayı reddediliyor: %s" msgid "Descriptive name for the image" msgstr "İmaj için açıklayıcı ad" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "%(driver_name)s sürücüsü düzgün bir şekilde yapılandırılamadı. Nedeni: " "%(reason)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Depolama yapılandırmasında hata. Depolamak için imaj ekleme devre dışıdır." #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Silinecek %(image_id)s imajını bulma işlemi başarısız oldu" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Silinecek %(resourcetype)s kaynak türü bulma işlemi başarısız oldu" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "İmaj önbellek veritabanı başlatılamadı. Alınan hata: %s" #, python-format msgid "Failed to read %s from config" msgstr "Yapılandırmadan %s okunamadı" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP hatası nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " "%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Dahili hata nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " "%(error)s" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Dosya tabanlı içeri aktarmlara izin verilmez. Lütfen imaj verilerinin yerel " "olmayan bir kaynağını kullanın." #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "Yasak istek, üstveri tanım ad alanı=%s görünür değil." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Yasak istek, %s görevi görünür değil" msgid "Format of the container" msgstr "Kabın biçimi" msgid "Format of the disk" msgstr "Diskin biçimi" #, python-format msgid "Host \"%s\" is not valid." msgstr "İstemci \"%s\" geçersizdir." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "İstemci ve bağlantı noktası \"%s\" geçersizdir." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Okunabilir bilgilendirme iletisi sadece uygun olduğunda (genellikle " "başarısızlıkta) dahildir" msgid "If true, image will not be deletable." msgstr "Eğer seçiliyse, imaj silinemeyecektir." msgid "If true, namespace will not be deletable." msgstr "Eğer seçiliyse, ad alanı silinemeyecektir." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "%(id)s imajı kullanımda olduğundan dolayı silinemedi: %(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "%(image_id)s imajı yüklemeden sonra bulunamadı. İmaj yükleme sırasında " "silinmiş olabilir: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "%(image_id)s imajı korumalıdır ve silinemez." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "%s imajı yüklendikten sonra bulunamadı. İmaj yükleme sırasında silinmiş, " "yüklenen parçalar temizlenmiş olabilir." #, python-format msgid "Image %s not found." msgstr "%s imajı bulunamadı." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "İmaj depolama kotasını aşar: %s" msgid "Image id is required." msgstr "İmaj kimliği gereklidir." #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "%(id)s imajı için üye sınırı aşıldı: %(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s durumundan %(new_status)s durumuna imaj durum geçişine izin " "verilmez" #, python-format msgid "Image storage media is full: %s" msgstr "İmaj depolama ortamı dolu: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "%(id)s imajı için etiket sınırı aşıldı: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "İmaj yükleme sorunu: %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Verilen %(image_id)s ile imaj bulunamadı" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Hatalı yetki stratejisi, beklenen değer, \"%(expected)s\" ancak alınan " "değer, \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Hatalı istek: %s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Girdi '%(key)s' alanı içermez" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "İmaj depolama ortamında yetersiz izinler: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Bu kaynak için geçersiz JSON işaretçisi: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift yapılandır dosyasında geçersiz yapılandırma." msgid "Invalid configuration in property protection file." msgstr "Özellik koruma dosyasında geçersiz yapılandırma." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Geçersiz içerik türü %(content_type)s" msgid "Invalid location" msgstr "Geçersiz konum" #, python-format msgid "Invalid location: %s" msgstr "Geçersiz konum: %s" msgid "Invalid locations" msgstr "Geçersiz konumlar" #, python-format msgid "Invalid locations: %s" msgstr "Geçersiz konumlar: %s" msgid "Invalid marker format" msgstr "Geçersiz işaretçi biçimi" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Geçersiz işlem: `%(op)s`. Şu seçeneklerden biri olmalıdır: %(available)s." msgid "Invalid position for adding a location." msgstr "Yer eklemek için geçersiz konum." msgid "Invalid position for removing a location." msgstr "Yer kaldırmak için geçersiz konum." msgid "Invalid service catalog json." msgstr "Geçersiz json servis katalogu." #, python-format msgid "Invalid sort direction: %s" msgstr "Geçersiz sıralama yönü: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Geçersiz sıralama anahtarı: %(sort_key)s. Şu seçeneklerden biri olmalıdır: " "%(available)s." #, python-format msgid "Invalid status value: %s" msgstr "Geçersiz durum değeri: %s" #, python-format msgid "Invalid status: %s" msgstr "Geçersiz durum: %s" #, python-format msgid "Invalid type value: %s" msgstr "Geçersiz tür değeri: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Geçersiz güncelleme. Aynı %s adıyla çift metadata tanım ad alanı ile " "sonuçlanır" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s " "ile çift metadata tanım nesnesi olmasına neden olacaktır." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanında=%(namespace_name)s aynı ad=%(name)s ile " "çift metadata tanım nesnesi ile sonuçlanır." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s ile " "çift metadata tanım özelliği olmasına neden olacaktır." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "'%(param)s' parametresi için '%(value)s' geçersiz değeri: %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "%(option)s seçeneği için geçersiz değer: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Geçersiz görünürlük değeri: %s" msgid "List of strings related to the image" msgstr "İmaj ile ilgili karakter dizilerinin listesi" msgid "Malformed JSON in request body." msgstr "İstek gövdesinde bozuk JSON." #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Yeniden yönlendirmelerin sınırı (%(redirects)s) aşıldı." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Üye %(member_id)s %(image_id)s imajı için çoğaltıldı" msgid "Member can't be empty" msgstr "Üye boş olamaz" msgid "Member to be added not specified" msgstr "Eklenecek üye belirtilmemiş" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "Metadata tanım ad alanı %(namespace)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s için metadata tanım ad alanı bulunamadı" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "Metadata tanım nesnesi %(object_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s için metadata tanım nesnesi bulunamadı" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "Metadata tanım özelliği %(property_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s için metadata tanım özelliği bulunamadı" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" " %(resource_type_name)s metadata tanım kaynak-türü sınıflanmış bir sistem " "türüdür ve silinemez." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Metadata tanım kaynak-tür-ilişkisi %(resource_type)s korumalıdır ve " "silinemez." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "Metadata tanım etiketi %(tag_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s için metadata tanım etiketi bulunamadı" #, python-format msgid "Missing required credential: %(required)s" msgstr "Gerekli olan kimlik eksik: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "%(region)s bölgesi için birden fazla 'image' servisi eşleşir. Bu genellikle, " "bir bölgenin gerekli olduğu ve sağlamadığınız anlamına gelir." #, python-format msgid "No image found with ID %s" msgstr "%s bilgileri ile hiçbir imaj bulunamadı" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "%(img)s imajından %(loc)s bilgisi ile hiçbir konum bulunamadı" #, python-format msgid "Not allowed to create members for image %s." msgstr "%s imajı için üye oluşturulmasına izin verilmedi." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "'%s' durumundaki imajın etkinliğini kaldırmaya izin verilmez" #, python-format msgid "Not allowed to delete members for image %s." msgstr "%s imajı için üyelerin silinmesine izin verilmedi." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "%s imajı için etiketlerin silinmesine izin verilmedi." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "'%s' durumundaki imajı yeniden etkinleştirmeye izin verilmez" #, python-format msgid "Not allowed to update members for image %s." msgstr "%s imajı için üyelerin güncellenmesine izin verilmedi." #, python-format msgid "Not allowed to update tags for image %s." msgstr "%s imajı için etiketlerin güncellenmesine izin verilmez." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "%(image_id)s imajı için imaj verisi yüklenmesine izin verilmedi: %(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Sıralama dizinlerinin sayısı, sıralama anahtarlarının sayısıyla eşleşmez" msgid "Old and new sorting syntax cannot be combined" msgstr "Eski ve yeni sıralama sözdizimi birleştirilemez" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "\"%s\" işlemi \"değer\" olarak adlandırılan bir üye ister." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "İşlem nesneleri \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " "tam olarak bir üye içermelidir." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "İşlem nesneleri, \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " "sadece bir üye içermelidir." msgid "Operations must be JSON objects." msgstr "İşlemler JSON nesnesi olmalıdır." #, python-format msgid "Original locations is not empty: %s" msgstr "Özgün konumlar boş değil: %s" msgid "Owner of the image" msgstr "İmajın sahibi" msgid "Owner of the namespace." msgstr "Ad alanı sahibi." msgid "Param values can't contain 4 byte unicode." msgstr "Param değerleri 4 bayt unikod içermez." #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "`%s` işaretçisi tanınmayan bir vazgeçme dizisinin parçası olmayan \"~\" " "içerir." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "`%s` işaretçisi bitişik \"/\" içerir." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "`%s`işaretçisi geçerli jeton içermez." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "`%s` işaretçisi \"/\" ile başlamaz." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "`%s` işaretçisi \"/\" ile sonlanır." #, python-format msgid "Port \"%s\" is not valid." msgstr "Bağlantı noktası \"%s\" geçersizdir." #, python-format msgid "Process %d not running" msgstr "%d süreci çalışmıyor" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "%s özellikleri veri kaydetmeden önce ayarlanmış olmalıdır." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "%(property_name)s özelliği beklenen kaynak tür ilişkilendirme ön eki " "'%(prefix)s' ile başlamaz." #, python-format msgid "Property %s already present." msgstr "Özellik %s zaten mevcut." #, python-format msgid "Property %s does not exist." msgstr "Özellik %s mevcut değil." #, python-format msgid "Property %s may not be removed." msgstr "Özellik %s kaldırılamayabilir." #, python-format msgid "Property %s must be set prior to saving data." msgstr "%s özelliği veri kaydetmeden önce ayarlanmış olmalıdır." msgid "Property names can't contain 4 byte unicode." msgstr "Özellik adları 4 bayt unicode içeremez." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Sağlanan nesne '%(schema)s' şeması ile eşleşmez: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Sağlanan görev durumu desteklenmiyor: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Sağlanan görev türü desteklenmiyor: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Ad alanı için kullanıcı dostu bir açıklama sağlar." msgid "Received invalid HTTP redirect." msgstr "Geçersiz HTTP yeniden yönlendirme isteği alındı." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Yetkilendirme için %(uri)s adresine yeniden yönlendiriliyor." #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Kayıt defteri API sunucusunda doğru bir şekilde yapılandırılamadı. Nedeni: " "%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s yeniden yükleme desteklenmiyor" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) yeniden yükleniyor" #, python-format msgid "Removing stale pid file %s" msgstr "Bozuk pid dosyası %s kaldırılıyor" msgid "Request body must be a JSON array of operation objects." msgstr "İstek vücudu işlem nesnelerinin bir JSON dizisi olmalıdır." msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone yanıtı bir Glance uç noktası içermiyor." msgid "Scope of image accessibility" msgstr "İmaj erişilebilirlik kapsamı" msgid "Scope of namespace accessibility." msgstr "Ad alanı erişebilirlik kapsamı." #, python-format msgid "Server %(serv)s is stopped" msgstr "Sunucu %(serv)s durdurulur" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Sunucu işçisi oluşturma işlemi başarısız oldu: %(reason)s." msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Bazı kaynak türleri her sunucu başına birden fazla anahtar / değer çiftine " "izin verir. Örneğin, Cinder mantıksal sürücü üzerinde kullanıcı ve imaj " "metadatalarına izin verir. Sadece imaj özellikleri metadataları Nova ile " "değerlendirilir (zamanlama ya da sürücüler). Bu özellik belirsizliği " "kaldırmak için bir ad alanı hedefine olanak sağlar." msgid "Sort direction supplied was not valid." msgstr "Sağlanan sıralama yönü geçersizdir." msgid "Sort key supplied was not valid." msgstr "Sağlanan sıralama anahtarı geçersizdir." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Verilen kaynak türü için kullanılacak öneki belirtir. Ad alanındaki her " "özellik belirtilen kaynak türüne uygulanırken önek eklenmelidir. Önek " "ayıracı içermelidir (örneğin; :)." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Durum \"bekliyor\", \"kabul edildi\" ya da \"reddedildi\" olmalıdır." msgid "Status not specified" msgstr "Durum belirtilmemiş" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s mevcut durumundan %(new_status)s yeni duruma geçişe izin " "verilmez" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) durduruluyor" msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' imaj özniteliği için desteklenen değerler" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' imaj özniteliği için desteklenen değerler" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s olarak yeniden oluşturulması durdurulan, %(rsn)s idi." msgid "System SIGHUP signal received." msgstr "Sistem SIGHUP sinyali aldı." #, python-format msgid "Task '%s' is required" msgstr "'%s' görevi gereklidir" msgid "Task does not exist" msgstr "Görev mevcut değil" msgid "Task failed due to Internal Error" msgstr "Görev Dahili Hata nedeniyle başarısız oldu" msgid "Task was not configured properly" msgstr "Görev düzgün bir şekilde yapılandırılmadı." #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Verilen %(task_id)s ile görev bulunamadı" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "" "\"belli bir zamandan sonraki değişiklikler\" süzgeci v2 sürümünde artık " "mevcut değil." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Belirtilen %s CA dosyası mevcut değil" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "%(task_id)s görevi ile oluşturulan %(image_id)s imaj nesnesi, artık ileri " "işlem için geçerli durumda değildir." msgid "The Store URI was malformed." msgstr "Depo URI'si bozulmuş." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Belirtilen %s sertifika dosyası mevcut değil" msgid "The current status of this task" msgstr "Görevin şu anki durumu" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "İmaj önbellek dizininin %(image_cache_dir)s yer aldığı aygıt xattr " "desteklemiyor. Önbellek dizini içeren aygıt için fstab düzenlemeniz ve uygun " "satıra user_xattr seçeneği eklemeniz gerekebilir." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Verilen uri geçersizdir. Lütfen, desteklenen uri listesinden %(supported)s " "geçerli bir uri belirtin" #, python-format msgid "The incoming image is too large: %s" msgstr "Gelen imaj çok büyük: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Belirttiğiniz %s anahtar dosyası mevcut değil" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj konumlarının sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Bu imaj için izin verilen imaj üye sınırı aşıldı.Denenen: %(attempted)s, En " "fazla: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj özelliklerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj etiketlerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "%(location)s konumu zaten mevcut" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Konum verisi geçersiz bir kimliğe sahip: %d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Ad=%(record_name)s ile metadata tanımı %(record_type)s silinebilir değil. " "Diğer kayıtlar hala onu gösteriyor." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "Metadata tanım ad alanı=%(namespace_name)s zaten mevcut." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(object_name)s ile metadata tanım nesnesi ad alanında=%(namespace_name)s " "bulunamadı." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Ad=%(property_name)s ile metadata tanım özelliği ad alanında=" "%(namespace_name)s bulunamadı." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Ad alanına=%(namespace_name)s kaynak türünün=%(resource_type_name)s metadata " "tanım kaynak tür ilişkisi zaten mevcut." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Kaynak türünün=%(resource_type_name)s ad alanında=%(namespace_name)s, " "metadata tanım kaynak-tür ilişkisi bulunamadı." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "Ad=%(resource_type_name)s ile metadata tanım kaynak-türü bulunamadı." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Ad=%(name)s ile metadata tanım etiketi ad alanında=%(namespace_name)s " "bulunamadı." msgid "The parameters required by task, JSON blob" msgstr "JSON blob, görev tarafından istenen parameteler" msgid "The provided image is too large." msgstr "Getirilen imaj çok büyük." msgid "The request returned 500 Internal Server Error." msgstr "İstek geri 500 İç Sunucu Hatası döndürdü." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "İstek 503 Hizmet Kullanılamıyor kodu döndürdü. Bu genellikle, hizmetin aşırı " "yük altında olduğu ya da geçici kesintiler oluştuğu anlamına gelir." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "İstek 302 Çok Seçenek kodu döndürdü. Bu genellikle, istek URI'sinin bir " "sürüm göstergesi içermediği anlamına gelir.\n" "\n" "Dönen yanıtın gövdesi:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "İstek 413 Girilen Veri Çok Büyük kodu döndürdü. Bu genellikle, hız " "sınırlayıcı ya da kota eşiği ihlali anlamına gelir.\n" "\n" "Yanıt gövdesi:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "İstek beklenmeyen bir durum döndürdü: %(status)s.\n" "\n" "Yanıt:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "İstenen imaj devrede değil. İmaj verisi indirmek yasak." msgid "The result of current task, JSON blob" msgstr "Şu anki görevin sonucu, JSON blob" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "%(image_size)s veri boyutu sınırı aşacak. Kalan bayt %(remaining)s " #, python-format msgid "The specified member %s could not be found" msgstr "Belirtilen üye %s bulunamadı" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Belirtilen metadata nesnesi %s bulunamadı" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Belirtilen metadata etiketi %s bulunamadı" #, python-format msgid "The specified namespace %s could not be found" msgstr "Belirtilen ad alanı %s bulunamadı" #, python-format msgid "The specified property %s could not be found" msgstr "Belirtilen özellik %s bulunamadı" #, python-format msgid "The specified resource type %s could not be found " msgstr "Belirtilen kaynak türü %s bulunamadı " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Silinen imaj konumunun durumu sadece 'pending_delete' ya da 'deleted' olarak " "ayarlanabilir" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Silinen imaj konum durumu sadece 'pending_delete' ya da 'deleted' olarak " "ayarlanabilir." msgid "The status of this image member" msgstr "Bu imaj üyesinin durumu" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Hedef üye %(member_id)s, %(image_id)s imajı ile zaten ilişkilendirilmiştir." msgid "The type of task represented by this content" msgstr "Bu içerik ile sunulan görev türü" msgid "The unique namespace text." msgstr "Eşsiz ad alanı metni." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Kullanıcı dostu ad alanı adı. Eğer mevcut ise, kullanıcı arayüzü tarafından " "kullanılır." msgid "There was an error configuring the client." msgstr "İstemci yapılandırılırken bir hata meydana geldi." msgid "There was an error connecting to a server" msgstr "Sunucuya bağlanırken bir hata meydana geldi" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Şu anda Glance Görevleri üzerinde bu işleme izin verilmiyor. Onlar " "expires_at özellikliğine göre süreleri dolduktan sonra otomatik silinirler." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Bir görevin başarılı ya da başarısız olarak sonuçlanmasından sonra saat " "olarak yaşayacağı süre" msgid "Too few arguments." msgstr "Çok fazla değişken." #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Pid dosyası %(pid)s oluşturulamadı. Root olmadan çalıştırılsın mı?\n" "Geçici bir dosyaya geri düşüyor, şu komutları kullanarak %(service)s " "servisini durdurabilirsiniz:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgid "Unable to filter on a range with a non-numeric value." msgstr "Sayısal olmayan değer ile bir aralıkta süzme yapılamadı." msgid "Unable to filter using the specified range." msgstr "Belirtilen aralık kullanılarak süzme yapılamadı." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON Şema değişikliğinde '%s' bulunamadı" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON Şema değişikliğinde `op` bulunamadı. Şu seçeneklerden biri olmalıdır: " "%(available)s." msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "Dosya tanıtıcı sınır arttırılamadı. Root olmadan çalıştırılsın mı?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "%(conf_file)s yapılandırma dosyasından %(app_name)s uygulaması yüklenemedi.\n" "Alınan: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Şema yüklenemedi: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s için yapıştırma yapılandırma dosyası yerleştirilemedi." #, python-format msgid "Unexpected response: %s" msgstr "Beklenmeyen yanıt: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Bilinmeyen kimlik doğrulama stratejisi '%s'" #, python-format msgid "Unknown command: %s" msgstr "Bilinmeyen komut: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' or 'asc' olmalıdır" msgid "Unrecognized JSON Schema draft version" msgstr "Tanınmayan JSON Şeması taslak sürümü" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "%(pid)s (%(file)s) pid'i öldürmek için 15 saniye beklendi; vazgeçiliyor" msgid "You are not authenticated." msgstr "Kimliğiniz doğrulanamadı." msgid "You are not authorized to complete this action." msgstr "Bu eylemi tamamlamak için yetkili değilsiniz." msgid "You are not permitted to create image members for the image." msgstr "İmaj için üye oluşturma izniniz yok." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s''ye ait imaj oluşturma izniniz yok." msgid "You do not own this image" msgstr "Bu imajın sahibi değilsiniz" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Bağlanırken SSL kullanmayı seçtiniz ve bir sertifika sağladınız, ancak ya " "key_file parametresi sağlamayı ya da GLANCE_CLIENT_KEY_FILE değişkeni " "ayarlama işlemini başaramadınız." msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Bağlanırken SSL kullanmayı seçtiniz ve bir anahtar sağladınız, ancak ya " "cert_file parametresi sağlamayı ya da GLANCE_CLIENT_CERT_FILE değişkeni " "ayarlama işlemini başaramadınız." msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() beklenmeyen anahtar sözcük değişkeni '%s' aldı" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "güncellemede (istenen from_state=%(from)s), %(current)s mevcut durumundan " "%(next)s sonrakine geçiş olamaz " #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "özel özellikler (%(props)s) temel özellikler ile çatışır" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "bu platformda eventlet 'poll' ya da 'selects' havuzları kullanılabilirdir" msgid "limit param must be an integer" msgstr "Sınır parametresi tam sayı olmak zorunda" msgid "limit param must be positive" msgstr "Sınır parametresi pozitif olmak zorunda" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() beklenmeyen anahtar sözcük %s aldı" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s başlatılamadı. Alınan hata: %(e)s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7782936 glance-29.0.0/glance/locale/zh_CN/0000775000175000017500000000000000000000000016547 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000000000000000020334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/zh_CN/LC_MESSAGES/glance.po0000664000175000017500000014040400000000000022130 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # blkart , 2015 # Dongliang Yu , 2013 # Kecheng Bi , 2014 # Tom Fifield , 2013 # 颜海峰 , 2014 # Andreas Jaeger , 2016. #zanata # howard lee , 2016. #zanata # blkart , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-06-24 04:45+0000\n" "Last-Translator: blkart \n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "最后一个 RPC 调用中发生 %(cls)s 异常:%(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "在映像 %(i_id)s 的成员列表中找不到 %(m_id)s。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) 正在运行..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s 似乎已在运行:%(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "%(task_id)s(类型为 %(task_type)s)未正确配置。未能装入文件系统存储器" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s(类型为 %(task_type)s)未正确配置。缺少工作目录:%(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "正在%(verb)s %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "正在%(verb)s %(serv)s(借助 %(conf)s)" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 请指定 host:port 对,其中 host 是 IPv4 地址、IPv6 地址、主机名或 FQDN。如" "果使用 IPv6 地址,请将其括在方括号中并与端口隔开(即,“[fe80::a:b:" "c]:9876”)。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 不能包含 4 字节 Unicode 字符。" #, python-format msgid "%s is already stopped" msgstr "%s 已停止" #, python-format msgid "%s is stopped" msgstr "%s 已停止" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "当启用了 keystone 认证策略时,需要 --os_auth_url 选项或 OS_AUTH_URL 环境变" "量\n" msgid "A body is not expected with this request." msgstr "此请求不应有主体。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,已存在名称为 %(object_name)s 的元数据定义对" "象。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,已存在名称为 %(property_name)s 的元数据定义" "属性。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "已存在名称为 %(resource_type_name)s 的元数据定义资源类型。" msgid "A set of URLs to access the image file kept in external store" msgstr "用于访问外部存储器中保留的映像文件的 URL集合" msgid "Amount of disk space (in GB) required to boot image." msgstr "引导映像所需的磁盘空间量(以 GB 计)。" msgid "Amount of ram (in MB) required to boot image." msgstr "引导映像所需的 ram 量(以 MB 计)。" msgid "An identifier for the image" msgstr "映像的标识" msgid "An identifier for the image member (tenantId)" msgstr "映像成员的标识 (tenantId)" msgid "An identifier for the owner of this task" msgstr "此任务的所有者的标识" msgid "An identifier for the task" msgstr "任务的标识" msgid "An image file url" msgstr "映像文件的 URL" msgid "An image schema url" msgstr "映像模式的 URL" msgid "An image self url" msgstr "映像本身的 URL" msgid "An import task exception occurred" msgstr "发生了导入任务异常。" msgid "An object with the same identifier already exists." msgstr "具有同一标识的对象已存在。" msgid "An object with the same identifier is currently being operated on." msgstr "当前正在对具有同一标识的对象进行操作。" msgid "An object with the specified identifier was not found." msgstr "找不到具有指定标识的对象。" msgid "An unknown exception occurred" msgstr "发生未知异常" msgid "An unknown task exception occurred" msgstr "发生未知任务异常" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "属性“%(property)s”是只读的。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "属性“%(property)s”已保留。" #, python-format msgid "Attribute '%s' is read-only." msgstr "属性“%s”是只读的。" #, python-format msgid "Attribute '%s' is reserved." msgstr "属性“%s”已保留。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "只能为已排队的映像替换属性 container_format。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "只能为已排队的映像替换属性 disk_format。" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "找不到 URL %(url)s 处的授权服务。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "认证错误 - 文件上传期间此令牌可能已到期。正在删除 %s 的映像数据。" msgid "Authorization failed." msgstr "授权失败。" msgid "Available categories:" msgstr "可用的类别:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "无效“%s”查询过滤器格式。请使用 ISO 8601 日期时间注释。" #, python-format msgid "Bad header: %(header_name)s" msgstr "头 %(header_name)s 不正确" msgid "Body expected in request." msgstr "请求中需要主体。" msgid "Cannot be a negative value" msgstr "不能为负值" msgid "Cannot be a negative value." msgstr "不得为负值。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "无法将映像 %(key)s“%(value)s”转换为整数。" msgid "Cannot remove last location in the image." msgstr "不能移除映像中的最后一个位置。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "无法为镜像%(image_id)s保存数据: %(error)s" msgid "Cannot set locations to empty list." msgstr "不能将位置设置为空列表。" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "校验和验证失败。已异常中止映像“%s”的高速缓存。" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "发生连接错误,或者对 URL %(url)s 处的授权服务的请求不正确。" #, python-format msgid "Constructed URL: %s" msgstr "已构造 URL:%s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "对于映像 %(image_id)s,映像下载已损坏" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "在尝试时间达到 30 秒之后未能绑定至 %(host)s:%(port)s" msgid "Could not find OVF file in OVA archive file." msgstr "在 OVA 归档文件中找不到 OVF 文件。" #, python-format msgid "Could not find metadata object %s" msgstr "找不到元数据对象 %s" #, python-format msgid "Could not find metadata tag %s" msgstr "找不到元数据标记 %s" #, python-format msgid "Could not find property %s" msgstr "找不到属性 %s" #, python-format msgid "Could not find task %s" msgstr "找不到任务 %s" #, python-format msgid "Could not update image: %s" msgstr "未能更新映像:%s" #, python-format msgid "Couldn't create metadata namespace: %s" msgstr "无法创建元数据命名空间:%s" #, python-format msgid "Couldn't create metadata object: %s" msgstr "无法创建元数据对象:%s" #, python-format msgid "Couldn't create metadata property: %s" msgstr "无法创建元数据属性:%s" #, python-format msgid "Couldn't create metadata tag: %s" msgstr "无法创建元数据标签:%s" #, python-format msgid "Couldn't update metadata namespace: %s" msgstr "无法更新元数据命名空间:%s" #, python-format msgid "Couldn't update metadata object: %s" msgstr "无法更新元数据对象:%s" #, python-format msgid "Couldn't update metadata property: %s" msgstr "无法更新元数据属性:%s" #, python-format msgid "Couldn't update metadata tag: %s" msgstr "无法更新元数据标签:%s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "当前包含多个磁盘的 OVA 包不受支持。" msgid "Data supplied was not valid." msgstr "提供的数据无效。" msgid "Date and time of image member creation" msgstr "创建映像成员的日期和时间" msgid "Date and time of image registration" msgstr "注册映像的日期和时间" msgid "Date and time of last modification of image member" msgstr "最近一次修改映像成员的日期和时间" msgid "Date and time of namespace creation" msgstr "创建名称空间的日期和时间" msgid "Date and time of object creation" msgstr "创建对象的日期和时间" msgid "Date and time of resource type association" msgstr "关联资源类型的日期和时间" msgid "Date and time of tag creation" msgstr "创建标记的日期和时间" msgid "Date and time of the last image modification" msgstr "最近一次修改映像的日期和时间" msgid "Date and time of the last namespace modification" msgstr "最近一次修改名称空间的日期和时间" msgid "Date and time of the last object modification" msgstr "最近一次修改对象的日期和时间" msgid "Date and time of the last resource type association modification" msgstr "最近一次修改资源类型关联的日期和时间" msgid "Date and time of the last tag modification" msgstr "最近一次修改标记的日期和时间" msgid "Datetime when this resource was created" msgstr "此资源的创建日期时间" msgid "Datetime when this resource was updated" msgstr "此资源的更新日期时间" msgid "Datetime when this resource would be subject to removal" msgstr "将会移除此资源的日期时间" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "正在拒绝尝试上载映像,因为它超过配额:%s" msgid "Descriptive name for the image" msgstr "映像的描述性名称" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "未能正确配置驱动程序 %(driver_name)s。原因:%(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "对请求解码时出错。Glance 无法对 URL 或请求主体包含的字符进行解码。" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "访存映像 %(image_id)s 的成员时出错:%(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "存储配置中出错。已禁止将映像添加至存储器。" #, python-format msgid "Error: %(exc_type)s: %(e)s" msgstr "错误: %(exc_type)s: %(e)s" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "成员应为以下格式:{\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "状态应为以下格式:{\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "未能找到要删除的映像 %(image_id)s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "找不到要删除的资源类型 %(resourcetype)s" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "未能初始化映像高速缓存数据库。发生错误:%s" #, python-format msgid "Failed to read %s from config" msgstr "未能从配置读取 %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "由于 HTTP 错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "由于内部错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "文件 %(path)s 具有无效支持文件 %(bfile)s,正在异常中止。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "不允许基于文件的导入。请使用映像数据的非本地源。" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "正在禁止请求,元数据定义名称空间 %s 不可视。" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "正在禁止请求,任务 %s 不可视" msgid "Format of the container" msgstr "容器的格式" msgid "Format of the disk" msgstr "磁盘格式" #, python-format msgid "Host \"%s\" is not valid." msgstr "主机“%s”无效。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "主机和端口“%s”无效。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "人工可读的信息性消息,仅在适当时(通常在发生故障时)才包括" msgid "If true, image will not be deletable." msgstr "如果为 true,那么映像将不可删除。" msgid "If true, namespace will not be deletable." msgstr "如果为 true,那么名称空间将不可删除。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "映像 %(id)s 未能删除,因为它正在使用中:%(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "镜像%(image_id)s上传后无法找到。镜像在上传过程中可能被删除: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "映像 %(image_id)s 受保护,无法删除。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "在上载之后,找不到映像 %s。可能已在上载期间删除该映像,正在清除已上载的区块。" #, python-format msgid "Image %s not found." msgstr "找不到映像 %s " #, python-format msgid "Image exceeds the storage quota: %s" msgstr "镜像超出存储限额: %s" msgid "Image id is required." msgstr "需要映像标识。" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "对于映像 %(id)s,超过映像成员限制:%(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不允许映像状态从 %(cur_status)s 转变为 %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "映像存储介质已满:%s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "对于映像 %(id)s,超过映像标记限制:%(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "发生映像上载问题:%s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "找不到具有所给定标识 %(image_id)s 的映像" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "授权策略不正确,期望的是“%(expected)s”,但接收到的是“%(received)s”" #, python-format msgid "Incorrect request: %s" msgstr "以下请求不正确:%s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "输入没有包含“%(key)s”字段" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "对映像存储介质的许可权不足:%s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "这个资源无效的JSON指针: '/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 配置文件中的配置无效。" msgid "Invalid configuration in property protection file." msgstr "属性保护文件中的配置无效。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "内容类型 %(content_type)s 无效" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "无效过滤器值 %s。缺少右引号。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "无效过滤器值 %s。右引号之后没有逗号。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "无效过滤器值 %s。左引号之前没有逗号。" #, python-format msgid "Invalid int value for age_in_days: %(age_in_days)s" msgstr "age_in_days的无效整形值:%(age_in_days)s" #, python-format msgid "Invalid int value for max_rows: %(max_rows)s" msgstr "max_rows的无效整形值:%(max_rows)s" msgid "Invalid location" msgstr "无效的位置" #, python-format msgid "Invalid location: %s" msgstr "以下位置无效:%s" msgid "Invalid locations" msgstr "无效的位置" #, python-format msgid "Invalid locations: %s" msgstr "无效的位置:%s" msgid "Invalid marker format" msgstr "标记符格式无效" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "操作“%(op)s”无效。它必须是下列其中一项:%(available)s。" msgid "Invalid position for adding a location." msgstr "用于添加位置 (location) 的位置 (position) 无效。" msgid "Invalid position for removing a location." msgstr "用于移除位置 (location) 的位置 (position) 无效。" msgid "Invalid service catalog json." msgstr "服务目录 json 无效。" #, python-format msgid "Invalid sort direction: %s" msgstr "排序方向无效:%s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "以下排序键无效:%(sort_key)s。它必须是下列其中一项:%(available)s。" #, python-format msgid "Invalid status value: %s" msgstr "状态值 %s 无效" #, python-format msgid "Invalid status: %s" msgstr "状态无效:%s" #, python-format msgid "Invalid type value: %s" msgstr "类型值 %s 无效" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "更新无效。它将导致出现重复的元数据定义名称空间,该名称空间具有同一名称 %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," "该对象具有同一名称 %(name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," "该对象具有同一名称 %(name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义属性," "该属性具有同一名称 %(name)s。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "参数“%(param)s”的值“%(value)s”无效:%(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "选项 %(option)s 的以下值无效:%(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "可视性值无效:%s" #, python-format msgid "It's not allowed to add locations if image status is %s." msgstr "如果镜像状态为 %s,则不允许添加位置。" msgid "It's not allowed to add locations if locations are invisible." msgstr "不允许添加不可视的位置。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "不允许移除不可视的位置。" msgid "It's not allowed to update locations if locations are invisible." msgstr "不允许更新不可视的位置。" msgid "List of strings related to the image" msgstr "与映像相关的字符串的列表" msgid "Malformed JSON in request body." msgstr "请求主体中 JSON 的格式不正确。" msgid "Maximal age is count of days since epoch." msgstr "最大年龄是自新纪元开始计算的天数。" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "已超过最大重定向次数 (%(redirects)s)。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "对于映像 %(image_id)s,已复制成员 %(member_id)s" msgid "Member can't be empty" msgstr "成员不能为空" msgid "Member to be added not specified" msgstr "未指定要添加的成员" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "元数据定义名称空间 %(namespace)s 受保护,无法删除。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "对于标识 %s,找不到元数据定义名称空间" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "元数据定义对象 %(object_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "对于标识 %s,找不到元数据定义对象" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "元数据定义属性 %(property_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "对于标识 %s,找不到元数据定义属性" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "元数据定义资源类型 %(resource_type_name)s 是种子型系统类型,无法删除。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "元数据定义资源类型关联 %(resource_type)s 受保护,无法删除。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "元数据定义标记 %(tag_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "对于标识 %s,找不到元数据定义标记" #, python-format msgid "Missing required credential: %(required)s" msgstr "缺少必需凭证:%(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "对于区域 %(region)s,存在多个“映像”服务匹配项。这通常意味着需要区域并且尚未提" "供一个区域。" #, python-format msgid "No image found with ID %s" msgstr "找不到任何具有标识 %s 的映像" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "在映像 %(img)s 中找不到标识为 %(loc)s 的位置" #, python-format msgid "Not allowed to create members for image %s." msgstr "不允许为映像 %s 创建成员。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "不允许取消激活状态为“%s”的映像" #, python-format msgid "Not allowed to delete members for image %s." msgstr "不允许为映像 %s 删除成员。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "不允许为映像 %s 删除标记。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "不允许重新激活状态为“%s”的映像" #, python-format msgid "Not allowed to update members for image %s." msgstr "不允许为映像 %s 更新成员。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "不允许为映像 %s 更新标记。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "不允许为镜像%(image_id)s上传数据:%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "排序方向数与排序键数不匹配" msgid "OVA extract is limited to admin" msgstr "OVA 抽取操作仅限管理员执行" msgid "Old and new sorting syntax cannot be combined" msgstr "无法组合新旧排序语法" msgid "Only shared images have members." msgstr "只有已共享的镜像拥有成员." #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "操作“%s”需要名为“value”的成员。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "操作对象必须刚好包含一个名为“add”、“remove”或“replace”的成员。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "操作对象必须仅包含一个名为“add”、“remove”或“replace”的成员。" msgid "Operations must be JSON objects." msgstr "操作必须是 JSON 对象。" #, python-format msgid "Original locations is not empty: %s" msgstr "原位置不为空: %s" msgid "Owner can't be updated by non admin." msgstr "非管理员无法更新所有者。" msgid "Owner of the image" msgstr "映像的所有者" msgid "Owner of the namespace." msgstr "名称空间的所有者。" msgid "Param values can't contain 4 byte unicode." msgstr "参数值不能包含 4 字节 Unicode。" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "指针“%s”包含并非可识别转义序列的一部分的“~”。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "指针`%s` 包含连接符\"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "指针`%s` 没有包含有效的口令" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "指针“%s”没有以“/”开头。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "指针`%s` 以\"/\"结束." #, python-format msgid "Port \"%s\" is not valid." msgstr "端口“%s”无效。" #, python-format msgid "Process %d not running" msgstr "进程 %d 未在运行" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "必须在保存数据之前设置属性 %s。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "属性 %(property_name)s 未以需要的资源类型关联前缀“%(prefix)s”开头。" #, python-format msgid "Property %s already present." msgstr "属性 %s 已存在。" #, python-format msgid "Property %s does not exist." msgstr "属性 %s 不存在。" #, python-format msgid "Property %s may not be removed." msgstr "无法除去属性 %s。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "必须在保存数据之前设置属性 %s。" msgid "Property names can't contain 4 byte unicode." msgstr "属性名称不能包含 4 字节 Unicode。" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "提供的对象与模式“%(schema)s”不匹配:%(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "不支持任务的所提供状态:%(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "不支持任务的所提供类型:%(type)s" msgid "Provides a user friendly description of the namespace." msgstr "提供名称空间的用户友好描述。" msgid "Received invalid HTTP redirect." msgstr "接收到无效 HTTP 重定向。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "对于授权,正在重定向至 %(uri)s。" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "API 服务器上未正确配置注册表。原因:%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "不支持重新装入 %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在重新装入 %(serv)s(pid 为 %(pid)s),信号为 (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" msgid "Request body must be a JSON array of operation objects." msgstr "请求主体必须是由操作对象组成的 JSON 数组。" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "来自 Keystone 的响应没有包含 Glance 端点。" msgid "Scope of image accessibility" msgstr "映像辅助功能选项的作用域" msgid "Scope of namespace accessibility." msgstr "名称空间辅助功能选项的作用域。" #, python-format msgid "Server %(serv)s is stopped" msgstr "服务器 %(serv)s 已停止" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "服务器工作程序创建失败:%(reason)s。" msgid "Signature verification failed" msgstr "签名认证失败" msgid "Size of image file in bytes" msgstr "映像文件的大小,以字节计" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "一些资源类型允许每个实例具有多个“键/值”对。例如,Cinder 允许卷上的用户元数据" "和映像元数据。仅映像属性元数据是通过 Nova(调度或驱动程序)求值。此属性允许名" "称空间目标除去不确定性。" msgid "Sort direction supplied was not valid." msgstr "提供的排序方向无效。" msgid "Sort key supplied was not valid." msgstr "提供的排序键无效。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定要用于给定的资源类型的前缀。当应用于指定的资源类型时,名称空间中的任何属" "性都应该使用此前缀作为前缀。必须包括前缀分隔符(例如冒号 :)。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "状态必须为“暂挂”、“已接受”或“已拒绝”。" msgid "Status not specified" msgstr "未指定状态" msgid "Status of the image" msgstr "映像的状态" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不允许状态从 %(cur_status)s 转变为 %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在通过信号 (%(sig)s) 停止 %(serv)s (pid %(pid)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "“container_format”映像属性支持的值" msgid "Supported values for the 'disk_format' image attribute" msgstr "“disk_format”映像属性支持的值" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "已阻止重新衍生,因为 %(serv)s 为 %(rsn)s。" msgid "System SIGHUP signal received." msgstr "接收到系统 SIGHUP 信号。" #, python-format msgid "Task '%s' is required" msgstr "需要任务“%s”" msgid "Task does not exist" msgstr "任务不存在" msgid "Task failed due to Internal Error" msgstr "由于发生内部错误而导致任务失败" msgid "Task was not configured properly" msgstr "任务未正确配置" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "找不到具有给定标识 %(task_id)s 的任务" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "“changes-since”过滤器在 v2 上不再可用。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "已指定的 CA 文件 %s 不存在" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "此任务 %(task_id)s 正在创建的映像 %(image_id)s 对象不再处于有效状态,无法进一" "步处理。" msgid "The Store URI was malformed." msgstr "存储器 URI 的格式不正确。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "已指定的证书文件 %s 不存在" msgid "The current status of this task" msgstr "此任务的当前状态" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "存放映像高速缓存目录 %(image_cache_dir)s 的设备不支持 xattr。您可能需要编辑 " "fstab 并将 user_xattr 选项添加至存放该高速缓存目录的设备的相应行。" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "给定的 URI 无效。请从受支持的 URI %(supported)s 的以下列表中指定有效 URI" #, python-format msgid "The incoming image is too large: %s" msgstr "引入的映像太大:%s" #, python-format msgid "The key file you specified %s does not exist" msgstr "已指定的密钥文件 %s 不存在" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像位置数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像成员数(对于此映像)的限制。已尝试:%(attempted)s,最大" "值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像属性数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像标记数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "位置 %(location)s 已存在" #, python-format msgid "The location data has an invalid ID: %d" msgstr "位置数据具有无效标识:%d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "未删除名称为 %(record_name)s 的元数据定义 %(record_type)s。其他记录仍然对其进" "行引用。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "元数据定义名称空间 %(namespace_name)s 已存在。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(object_name)s 的元数据定义对" "象。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(property_name)s 的元数据定义" "属性。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "已存在以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" "称空间 %(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "找不到以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" "称空间 %(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "找不到名称为 %(resource_type_name)s 的元数据定义资源类型。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(name)s 的元数据定义标记。" msgid "The parameters required by task, JSON blob" msgstr "任务 JSON blob 所需的参数" msgid "The provided image is too large." msgstr "提供的映像太大。" msgid "The request returned 500 Internal Server Error." msgstr "该请求返回了“500 内部服务器错误”。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "该请求返回了“503 服务不可用”。这通常在服务超负荷或其他瞬态停止运行时发生。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "该请求返回了“302 多选项”。这通常意味着您尚未将版本指示器包括在请求 URI 中。\n" "\n" "返回了响应的主体:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "该请求返回了“413 请求实体太大”。这通常意味着已违反比率限制或配额阈值。\n" "\n" "响应主体:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "该请求返回了意外状态:%(status)s。\n" "\n" "响应主体:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "所请求映像已取消激活。已禁止下载映像数据。" msgid "The result of current task, JSON blob" msgstr "当前任务 JSON blob 的结果" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "数据大小 %(image_size)s 将超过限制。将剩余 %(remaining)s 个字节。" #, python-format msgid "The specified member %s could not be found" msgstr "找不到指定的成员 %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "找不到指定的元数据对象 %s" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "找不到指定的元数据标记 %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "找不到指定的名称空间 %s" #, python-format msgid "The specified property %s could not be found" msgstr "找不到指定的属性 %s" #, python-format msgid "The specified resource type %s could not be found " msgstr "找不到指定的资源类型 %s" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”。" msgid "The status of this image member" msgstr "此映像成员的状态" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "目标成员 %(member_id)s 已关联映像 %(image_id)s。" msgid "The type of task represented by this content" msgstr "此内容表示的任务的类型" msgid "The unique namespace text." msgstr "唯一名称空间文本。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "名称空间的用户友好名称。由 UI 使用(如果可用)。" msgid "There was an error configuring the client." msgstr "配置客户机时出错。" msgid "There was an error connecting to a server" msgstr "连接至服务器时出错" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "当前不允许对 Glance 任务执行此操作。到达基于 expires_at 属性的时间后,它们会" "自动删除。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "任务在成功或失败之后生存的时间(以小时计)" msgid "Too few arguments." msgstr "太少参数" #, python-format msgid "" "Total size is %(size)d bytes (%(human_size)s) across %(img_count)d images" msgstr "总大小为 %(size)d 字节(%(human_size)s)(在 %(img_count)d 个映像上)" msgid "URL to access the image file kept in external store" msgstr "用于访问外部存储器中保留的映像文件的 URL" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "无法创建 pid 文件 %(pid)s。正在以非 root 用户身份运行吗?\n" "正在回退至临时文件,可使用以下命令停止 %(service)s 服务:\n" "%(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "无法按未知运算符“%s”进行过滤。" msgid "Unable to filter on a range with a non-numeric value." msgstr "无法对具有非数字值的范围进行过滤。" msgid "Unable to filter on a unknown operator." msgstr "无法针对未知运算符进行过滤。" msgid "Unable to filter using the specified operator." msgstr "无法使用指定运算符进行过滤。" msgid "Unable to filter using the specified range." msgstr "无法使用指定的范围进行过滤。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "在 JSON 模式更改中找不到“%s”" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "在 JSON 模式更改中找不到“op”。它必须是下列其中一项:%(available)s。" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "无法增大文件描述符限制。正在以非 root 用户身份运行吗?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "无法从配置文件 %(conf_file)s 装入 %(app_name)s。\n" "发生错误:%(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "无法装入模式:%(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "对于 %s,找不到粘贴配置文件。" msgid "Unexpected body type. Expected list/dict." msgstr "意外主体类型。应该为 list/dict。" #, python-format msgid "Unexpected response: %s" msgstr "接收到意外响应:%s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "授权策略“%s”未知" #, python-format msgid "Unknown command: %s" msgstr "未知命令%s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" msgid "Unrecognized JSON Schema draft version" msgstr "无法识别 JSON 模式草稿版本" msgid "Virtual size of image in bytes" msgstr "映像的虚拟大小,以字节计" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "用来等待 pid %(pid)s (%(file)s) 终止的时间已达到 15 秒;正在放弃" msgid "You are not authenticated." msgstr "您未经认证。" msgid "You are not authorized to complete this action." msgstr "您无权完成此操作。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "未授权您查询映像 %s。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "未授权您查询映像 %s 的成员。" msgid "You are not permitted to create image members for the image." msgstr "不允许为映像创建映像成员。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "不允许创建由“%s”拥有的映像。" msgid "You do not own this image" msgstr "您未拥有此映像" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "已选择在连接中使用 SSL,并且已提供证书,但是未能提供 key_file 参数或设置 " "GLANCE_CLIENT_KEY_FILE 环境变量" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "已选择在连接中使用 SSL,并且已提供密钥,但是未能提供 cert_file 参数或设置 " "GLANCE_CLIENT_CERT_FILE 环境变量" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() 已获取意外的关键字自变量“%s”" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "在更新中,无法从 %(current)s 转变为 %(next)s(需要 from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "定制属性 (%(props)s) 与基本基准冲突" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "在此平台上,eventlet“poll”和“selects”主数据中心都不可用" msgid "limit param must be an integer" msgstr "limit 参数必须为整数" msgid "limit param must be positive" msgstr "limit 参数必须为正数" msgid "md5 hash of image contents." msgstr "映像内容的 md5 散列。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() 已获取意外的关键字 %s" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "无法启动 %(serv)s。发生错误:%(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id 太长,最大大小为 %s" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.782294 glance-29.0.0/glance/locale/zh_TW/0000775000175000017500000000000000000000000016601 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8783054 glance-29.0.0/glance/locale/zh_TW/LC_MESSAGES/0000775000175000017500000000000000000000000020366 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/locale/zh_TW/LC_MESSAGES/glance.po0000664000175000017500000013544400000000000022172 0ustar00zuulzuul00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2024-08-29 21:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 05:23+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "前一個 RPC 呼叫已發出 %(cls)s 異常狀況:%(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "在映像檔 %(i_id)s 的成員清單中找不到 %(m_id)s。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) 正在執行中..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s 似乎已在執行中:%(pid)s" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "未適當地配置 %(task_id)s(類型為 %(task_type)s)。無法載入檔案系統儲存庫" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "未適當地配置 %(task_id)s(類型為 %(task_type)s)。遺漏工作目錄:%(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "正在對 %(serv)s 執行 %(verb)s 作業" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "透過 %(conf)s,正在對 %(serv)s 執行 %(verb)s 作業" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 請指定 host:port 組,其中 host 是 IPv4 位址、IPv6 位址、主機名稱或 FQDN。" "如果使用 IPv6 位址,請將其單獨括在方括弧內,以與埠區別開(例如 \"[fe80::a:b:" "c]:9876\")。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 不能包含 4 位元組 Unicode 字元。" #, python-format msgid "%s is already stopped" msgstr "已停止 %s" #, python-format msgid "%s is stopped" msgstr "%s 已停止" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "--os_auth_url 選項或 OS_AUTH_URL 環境變數(啟用 Keystone 鑑別策略時需要)\n" msgid "A body is not expected with this request." msgstr "此要求預期不含內文。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "名稱為 %(object_name)s 的 meta 資料定義物件已經存在於名稱空間 " "%(namespace_name)s 中。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "名稱為 %(property_name)s 的 meta 資料定義內容已經存在於名稱空間 " "%(namespace_name)s 中。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "名稱為 %(resource_type_name)s 的 meta 資料定義資源類型已存在。" msgid "A set of URLs to access the image file kept in external store" msgstr "用來存取外部儲存庫中所保留映像檔的 URL 集" msgid "Amount of disk space (in GB) required to boot image." msgstr "啟動映像檔所需的磁碟空間數量(以 GB 為單位)。" msgid "Amount of ram (in MB) required to boot image." msgstr "啟動映像檔所需的 RAM 數量(以 MB 為單位)。" msgid "An identifier for the image" msgstr "映像檔的 ID" msgid "An identifier for the image member (tenantId)" msgstr "映像檔成員的 ID (tenantId)" msgid "An identifier for the owner of this task" msgstr "此作業的擁有者 ID" msgid "An identifier for the task" msgstr "作業的 ID" msgid "An image file url" msgstr "映像檔 URL" msgid "An image schema url" msgstr "映像檔綱目 URL" msgid "An image self url" msgstr "映像檔自身 URL" msgid "An import task exception occurred" msgstr "發生匯入作業異常狀況" msgid "An object with the same identifier already exists." msgstr "已存在具有相同 ID 的物件。" msgid "An object with the same identifier is currently being operated on." msgstr "目前正在對具有相同 ID 的物件執行作業。" msgid "An object with the specified identifier was not found." msgstr "找不到具有所指定 ID 的物件。" msgid "An unknown exception occurred" msgstr "發生不明異常狀況" msgid "An unknown task exception occurred" msgstr "發生不明的作業異常狀況" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "屬性 '%(property)s' 是唯讀的。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "屬性 '%(property)s' 已保留。" #, python-format msgid "Attribute '%s' is read-only." msgstr "屬性 '%s' 是唯讀的。" #, python-format msgid "Attribute '%s' is reserved." msgstr "屬性 '%s' 已保留。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "僅已排入佇列的映像檔可以取代屬性 container_format。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "僅已排入佇列的映像檔可以取代屬性 disk_format。" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "在 URL %(url)s 處找不到鑑別服務。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "鑑別錯誤 - 在檔案上傳期間,記號可能已過期。正在刪除 %s 的映像檔資料。" msgid "Authorization failed." msgstr "授權失敗。" msgid "Available categories:" msgstr "可用的種類:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "\"%s\" 查詢過濾器格式錯誤。請使用 ISO 8601 日期時間表示法。" #, python-format msgid "Bad header: %(header_name)s" msgstr "錯誤的標頭:%(header_name)s" msgid "Body expected in request." msgstr "要求中需要內文。" msgid "Cannot be a negative value" msgstr "不能是負數值" msgid "Cannot be a negative value." msgstr "不能是負數值。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "無法將映像檔 %(key)s '%(value)s' 轉換為整數。" msgid "Cannot remove last location in the image." msgstr "無法移除映像檔中的最後位置。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "無法儲存映像檔 %(image_id)s 的資料:%(error)s" msgid "Cannot set locations to empty list." msgstr "無法將位置設為空白清單。" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "總和檢查驗證失敗。已中止快取映像檔 '%s'。" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "將錯誤/不當的要求連接至 URL %(url)s 處的鑑別服務。" #, python-format msgid "Constructed URL: %s" msgstr "已建構 URL:%s" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "映像檔 %(image_id)s 的映像檔下載已毀損" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "嘗試 30 秒鐘後仍無法連結至 %(host)s:%(port)s" msgid "Could not find OVF file in OVA archive file." msgstr "在 OVA 保存檔中找不到 OVF 檔。" #, python-format msgid "Could not find metadata object %s" msgstr "找不到 meta 資料物件 %s" #, python-format msgid "Could not find metadata tag %s" msgstr "找不到 meta 資料標籤 %s" #, python-format msgid "Could not find property %s" msgstr "找不到內容 %s" #, python-format msgid "Could not find task %s" msgstr "找不到作業 %s" #, python-format msgid "Could not update image: %s" msgstr "無法更新映像檔:%s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "目前,不支援包含多個磁碟的 OVA 套件。" msgid "Data supplied was not valid." msgstr "提供的資料無效。" msgid "Date and time of image member creation" msgstr "映像檔成員的建立日期和時間" msgid "Date and time of image registration" msgstr "映像檔登錄的日期和時間" msgid "Date and time of last modification of image member" msgstr "映像檔成員的前次修改日期和時間" msgid "Date and time of namespace creation" msgstr "名稱空間的建立日期和時間" msgid "Date and time of object creation" msgstr "物件的建立日期和時間" msgid "Date and time of resource type association" msgstr "資源類型關聯的日期和時間" msgid "Date and time of tag creation" msgstr "標記的建立日期和時間" msgid "Date and time of the last image modification" msgstr "映像檔的前次修改日期和時間" msgid "Date and time of the last namespace modification" msgstr "名稱空間的前次修改日期和時間" msgid "Date and time of the last object modification" msgstr "物件的前次修改日期和時間" msgid "Date and time of the last resource type association modification" msgstr "資源類型關聯的前次修改日期和時間" msgid "Date and time of the last tag modification" msgstr "標記的前次修改日期和時間" msgid "Datetime when this resource was created" msgstr "此資源的建立日期時間" msgid "Datetime when this resource was updated" msgstr "此資源的更新日期時間" msgid "Datetime when this resource would be subject to removal" msgstr "可能會移除此資源的日期時間" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "正在拒絕嘗試上傳映像檔,因為它已超出配額:%s" msgid "Descriptive name for the image" msgstr "映像檔的敘述性名稱" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "無法正確地配置驅動程式 %(driver_name)s。原因:%(reason)s" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "將您的要求進行解碼時發生錯誤。URL 或要求內文包含無法由 Glance 進行解碼的字元" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "提取映像檔 %(image_id)s 的成員時發生錯誤:%(inner_msg)s" msgid "Error in store configuration. Adding images to store is disabled." msgstr "儲存庫配置發生錯誤。已停用新增映像檔至儲存庫。" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "預期成員的格式為:{\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "預期狀態的格式為:{\"status\": \"status\"}" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "找不到要刪除的映像檔 %(image_id)s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "找不到要刪除的資源類型 %(resourcetype)s" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "無法起始設定映像檔快取資料庫。發生錯誤:%s" #, python-format msgid "Failed to read %s from config" msgstr "無法從配置中讀取 %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "由於 HTTP 錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "由於內部錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "檔案 %(path)s 具有無效的支援檔案 %(bfile)s,正在中斷。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "不容許檔案型匯入。請使用映像檔資料的非本端來源。" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "正在禁止要求,meta 資料定義名稱空間 %s 不可見。" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "正在禁止要求,作業 %s 不可見" msgid "Format of the container" msgstr "儲存器的格式" msgid "Format of the disk" msgstr "磁碟的格式" #, python-format msgid "Host \"%s\" is not valid." msgstr "主機 \"%s\" 無效。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "主機和埠 \"%s\" 無效。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "適當的時候(通常是失敗時)僅併入人類可讀的參考訊息" msgid "If true, image will not be deletable." msgstr "如果為 true,則映像檔不可刪除。" msgid "If true, namespace will not be deletable." msgstr "如果為 True,則名稱空間將不可刪除。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "無法刪除映像檔 %(id)s,因為它在使用中:%(exc)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "上傳之後找不到映像檔 %(image_id)s。可能已在上傳期間刪除該映像檔:%(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "映像檔 %(image_id)s 已受保護,無法刪除。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "上傳之後找不到映像檔 %s。可能已在上傳期間刪除該映像檔,正在清除已上傳的區塊。" #, python-format msgid "Image %s not found." msgstr "找不到映像檔 %s。" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "映像檔超出儲存體配額:%s" msgid "Image id is required." msgstr "映像檔 ID 是必要的。" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "已超出映像檔 %(id)s 的映像檔成員限制:%(e)s:" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不容許映像檔狀態從 %(cur_status)s 轉移至 %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "映像檔儲存媒體已滿:%s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "已超出映像檔 %(id)s 的映像檔標籤限制:%(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "映像檔上傳問題:%s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "找不到具有給定 ID %(image_id)s 的映像檔" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "不正確的鑑別策略,需要 \"%(expected)s\",但收到 \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "不正確的要求:%s" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "輸入不包含 '%(key)s' 欄位" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "對映像檔儲存媒體的許可權不足:%s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "此資源的 JSON 指標無效:'/%s'" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 配置檔中的配置無效。" msgid "Invalid configuration in property protection file." msgstr "內容保護檔案中的配置無效。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "無效的內容類型 %(content_type)s" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "無效的過濾器值 %s。遺漏右引號。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "無效的過濾器值 %s。右引號後面沒有逗點。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "無效的過濾器值 %s。左引號前面沒有逗點。" msgid "Invalid location" msgstr "無效的位置" #, python-format msgid "Invalid location: %s" msgstr "無效的位置:%s" msgid "Invalid locations" msgstr "無效的位置" #, python-format msgid "Invalid locations: %s" msgstr "無效的位置:%s" msgid "Invalid marker format" msgstr "無效的標記格式" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "無效作業:`%(op)s`。它必須是下列其中一項:%(available)s。" msgid "Invalid position for adding a location." msgstr "用於新增位置的位置無效。" msgid "Invalid position for removing a location." msgstr "用於移除位置的位置無效。" msgid "Invalid service catalog json." msgstr "無效的服務型錄 JSON。" #, python-format msgid "Invalid sort direction: %s" msgstr "無效的排序方向:%s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "排序鍵 %(sort_key)s 無效。它必須為下列其中一項:%(available)s。" #, python-format msgid "Invalid status value: %s" msgstr "無效的狀態值:%s" #, python-format msgid "Invalid status: %s" msgstr "無效的狀態:%s" #, python-format msgid "Invalid type value: %s" msgstr "無效的類型值:%s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "更新無效。它會導致產生具有相同名稱 %s 的重複 meta 資料定義名稱空間。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" "料定義物件:%(namespace_name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" "料定義物件:%(namespace_name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新無效。它會導致在下列名稱空間中產生具有相同名稱 %(name)s 的重複 meta 資料" "定義內容:%(namespace_name)s。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "參數 '%(param)s' 的值 '%(value)s' 無效:%(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "選項 %(option)s 的值 %(value)s 無效" #, python-format msgid "Invalid visibility value: %s" msgstr "無效的可見性值:%s" msgid "It's not allowed to add locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許新增位置。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許移除位置。" msgid "It's not allowed to update locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許更新位置。" msgid "List of strings related to the image" msgstr "與映像檔相關的字串清單" msgid "Malformed JSON in request body." msgstr "要求內文中 JSON 的格式不正確。" msgid "Maximal age is count of days since epoch." msgstr "經歷時間上限是自新紀元以來的天數。" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "已超出重新導向數目上限(%(redirects)s 個)。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "針對映像檔 %(image_id)s,成員 %(member_id)s 重複" msgid "Member can't be empty" msgstr "成員不能是空的" msgid "Member to be added not specified" msgstr "未指定要新增的成員" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "Meta 資料定義名稱空間 %(namespace)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義名稱空間" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "Meta 資料定義物件 %(object_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義物件" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "Meta 資料定義內容 %(property_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義內容" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Meta 資料定義資源類型 %(resource_type_name)s 是種子系統類型,無法將其刪除。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "Meta 資料定義資源類型關聯 %(resource_type)s 已受保護,無法將其刪除。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "meta 資料定義標籤 %(tag_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義標籤" #, python-format msgid "Missing required credential: %(required)s" msgstr "遺漏了必要認證:%(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "區域 %(region)s 有多個「映像檔」服務相符項。這通常表示需要一個區域,但您尚未" "提供。" #, python-format msgid "No image found with ID %s" msgstr "找不到 ID 為 %s 的映像檔" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "從映像檔 %(img)s 中找不到 ID 為 %(loc)s 的位置" #, python-format msgid "Not allowed to create members for image %s." msgstr "不容許建立映像檔 %s 的成員。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "不容許取消啟動處於狀態 '%s' 的映像檔" #, python-format msgid "Not allowed to delete members for image %s." msgstr "不容許刪除映像檔 %s 的成員。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "不容許刪除映像檔 %s 的標籤。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "不容許重新啟動處於狀態 '%s' 的映像檔" #, python-format msgid "Not allowed to update members for image %s." msgstr "不容許更新映像檔 %s 的成員。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "不容許更新映像檔 %s 的標籤。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "不容許上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" msgid "Number of sort dirs does not match the number of sort keys" msgstr "排序方向數目與排序鍵數目不符" msgid "OVA extract is limited to admin" msgstr "OVA 擷取已限制為管理者" msgid "Old and new sorting syntax cannot be combined" msgstr "無法結合新舊排序語法" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "作業 \"%s\" 需要名稱為 \"value\" 的成員。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "作業物件必須正好包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "作業物件只能包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" msgid "Operations must be JSON objects." msgstr "作業必須是 JSON 物件。" #, python-format msgid "Original locations is not empty: %s" msgstr "原始位置不是空的:%s" msgid "Owner can't be updated by non admin." msgstr "擁有者無法由非管理者進行更新。" msgid "Owner of the image" msgstr "映像檔的擁有者" msgid "Owner of the namespace." msgstr "名稱空間的擁有者。" msgid "Param values can't contain 4 byte unicode." msgstr "參數值不能包含 4 位元組 Unicode。" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "指標 `%s` 包含不屬於可辨識 ESC 序列的 \"~\"。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "指標 `%s` 包含相鄰的 \"/\"。" #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "指標 `%s` 不包含有效的記號。" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "指標 `%s` 的開頭不是 \"/\"。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "指標 `%s` 的結尾是 \"/\"。" #, python-format msgid "Port \"%s\" is not valid." msgstr "埠 \"%s\" 無效。" #, python-format msgid "Process %d not running" msgstr "程序 %d 不在執行中" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "儲存資料之前必須設定內容 %s。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "內容 %(property_name)s 的開頭不是預期的資源類型關聯字首 '%(prefix)s'。" #, python-format msgid "Property %s already present." msgstr "內容 %s 已存在。" #, python-format msgid "Property %s does not exist." msgstr "內容 %s 不存在。" #, python-format msgid "Property %s may not be removed." msgstr "可能無法移除內容 %s。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "儲存資料之前必須設定內容 %s。" msgid "Property names can't contain 4 byte unicode." msgstr "內容名稱不能包含 4 位元組 Unicode。" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "所提供的物件與綱目 '%(schema)s' 不符:%(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "提供的作業狀態 %(status)s 不受支援" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "提供的作業類型 %(type)s 不受支援" msgid "Provides a user friendly description of the namespace." msgstr "提供對使用者更為友善的名稱空間說明。" msgid "Received invalid HTTP redirect." msgstr "收到無效的 HTTP 重新導向。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "正在重新導向至 %(uri)s 以進行授權。" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "API 伺服器上未正確地配置登錄。原因:%(reason)s" #, python-format msgid "Reload of %(serv)s not supported" msgstr "不支援重新載入 %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在使用信號 (%(sig)s) 來重新載入 %(serv)s (pid %(pid)s)" #, python-format msgid "Removing stale pid file %s" msgstr "正在移除過時 PID 檔案 %s" msgid "Request body must be a JSON array of operation objects." msgstr "要求內文必須是作業物件的 JSON 陣列。" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone 的回應不包含 Glance 端點。" msgid "Scope of image accessibility" msgstr "映像檔的可存取性範圍" msgid "Scope of namespace accessibility." msgstr "名稱空間的可存取性範圍。" #, python-format msgid "Server %(serv)s is stopped" msgstr "伺服器 %(serv)s 已停止" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "建立伺服器工作程式失敗:%(reason)s。" msgid "Signature verification failed" msgstr "簽章驗證失敗" msgid "Size of image file in bytes" msgstr "映像檔的大小(以位元組為單位)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "部分資源類型容許每個實例具有多個鍵值組。例如,Cinder 容許使用者及映像檔 meta " "資料存在於多個磁區上。Nova 只評估映像檔內容 meta 資料(正在排程或驅動程式)。" "此內容容許名稱空間目標消除此語義不明確情況。" msgid "Sort direction supplied was not valid." msgstr "提供的排序方向無效。" msgid "Sort key supplied was not valid." msgstr "提供的排序鍵無效。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定要用於給定資源類型的字首。將名稱空間內的任何內容套用至指定的資源類型時," "都應該為該內容新增此字首。必須包括字首分隔字元(例如,冒號 :)。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "狀態必須是 \"pending\"、\"accepted\" 或 \"rejected\"。" msgid "Status not specified" msgstr "未指定狀態" msgid "Status of the image" msgstr "映像檔的狀態" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不容許狀態從 %(cur_status)s 轉移至 %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在使用信號 (%(sig)s) 來停止 %(serv)s (pid %(pid)s)" msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' 映像檔屬性的支援值" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' 映像檔屬性的支援值" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "已暫停重新大量產生,因為 %(serv)s 是 %(rsn)s。" msgid "System SIGHUP signal received." msgstr "接收到系統 SIGHUP 信號。" #, python-format msgid "Task '%s' is required" msgstr "需要作業 '%s'" msgid "Task does not exist" msgstr "作業不存在" msgid "Task failed due to Internal Error" msgstr "由於內部錯誤,作業失敗" msgid "Task was not configured properly" msgstr "作業未適當地配置" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "找不到具有給定 ID %(task_id)s 的作業" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "在第 2 版上,已無法再使用 \"changes-since\" 過濾器。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "指定的 CA 檔 %s 不存在" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "此作業 %(task_id)s 所建立的映像檔 %(image_id)s 物件不再處於有效狀態,無法進一" "步處理。" msgid "The Store URI was malformed." msgstr "儲存庫 URI 的格式不正確。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "指定的憑證檔 %s 不存在" msgid "The current status of this task" msgstr "此作業的現行狀態" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "存放映像檔快取目錄 %(image_cache_dir)s 的裝置不支援 xattr。您可能需要編輯 " "fstab 並將 user_xattr 選項新增至存放快取目錄之裝置的適當行。" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "給定的 URI 無效。請從下列受支援的 URI %(supported)s 清單中指定有效的 URI" #, python-format msgid "The incoming image is too large: %s" msgstr "送入的映像檔太大:%s" #, python-format msgid "The key file you specified %s does not exist" msgstr "指定的金鑰檔 %s 不存在" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔位置數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "此映像檔容許的映像檔成員數目已超出此限制。已嘗試:%(attempted)s,上限:" "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔內容數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔標籤數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "位置 %(location)s 已存在" #, python-format msgid "The location data has an invalid ID: %d" msgstr "位置資料的 ID 無效:%d" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "未刪除名稱為 %(record_name)s 的 meta 資料定義 %(record_type)s。其他記錄仍參照" "此 meta 資料定義。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "Meta 資料定義名稱空間 %(namespace_name)s 已經存在。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(object_name)s 的 meta 資料定義物件:" "%(namespace_name)s。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(property_name)s 的 meta 資料定義內容:" "%(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資料定義" "資源類型關聯已存在。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "找不到資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資" "料定義資源類型關聯。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "找不到名稱為 %(resource_type_name)s 的 meta 資料定義資源類型。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(name)s 的 meta 資料定義標籤:" "%(namespace_name)s。" msgid "The parameters required by task, JSON blob" msgstr "作業所需的參數:JSON 二進位大型物件" msgid "The provided image is too large." msgstr "所提供的映像檔太大。" msgid "The request returned 500 Internal Server Error." msgstr "要求傳回了「500 內部伺服器錯誤」。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "要求傳回了「503 無法使用服務」。通常,在服務超載或其他暫時性服務中斷時發生。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "要求傳回了「302 多重選擇」。這通常表示要求 URI 中尚不包含版本指示符。\n" "\n" "傳回了回應內文:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求傳回了「413 要求實體太大」。這通常表示已違反評比限制或配額臨界值。\n" "\n" "回應內文:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求傳回了非預期的狀態:%(status)s。\n" "\n" "回應內文:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "已取消啟動所要求的映像檔。已禁止下載映像檔資料。" msgid "The result of current task, JSON blob" msgstr "現行作業的結果:JSON 二進位大型物件" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "資料的大小 %(image_size)s 將超出該限制。剩餘 %(remaining)s 個位元組。" #, python-format msgid "The specified member %s could not be found" msgstr "找不到指定的成員 %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "找不到指定的 meta 資料物件 %s" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "找不到指定的 meta 資料標籤 %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "找不到指定的名稱空間 %s" #, python-format msgid "The specified property %s could not be found" msgstr "找不到指定的內容 %s" #, python-format msgid "The specified resource type %s could not be found " msgstr "找不到指定的資源類型 %s" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'。" msgid "The status of this image member" msgstr "此映像檔成員的狀態" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "目標成員 %(member_id)s 已經與映像檔%(image_id)s 相關聯。" msgid "The type of task represented by this content" msgstr "此內容所表示的作業類型" msgid "The unique namespace text." msgstr "唯一的名稱空間文字。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "對使用者更為友善的名稱空間名稱。如果有的話,則由使用者介面使用。" msgid "There was an error configuring the client." msgstr "配置用戶端時發生錯誤。" msgid "There was an error connecting to a server" msgstr "連接至伺服器時發生錯誤" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "目前不允許對 Glance 作業執行這項作業。根據它們的 expires_at內容,將在達到時間" "之後自動刪除它們。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "作業在成功或失敗後存活的時間(小時)" msgid "Too few arguments." msgstr "引數太少。" msgid "URL to access the image file kept in external store" msgstr "用來存取外部儲存庫中所保留之映像檔的 URL" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "無法建立 PID 檔案 %(pid)s。要以非 root 使用者身分執行嗎?\n" "正在撤回而使用暫存檔,您可以使用下列指令來停止 %(service)s 服務:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "無法依不明運算子 '%s' 進行過濾。" msgid "Unable to filter on a range with a non-numeric value." msgstr "無法對包含非數值的範圍進行過濾。" msgid "Unable to filter on a unknown operator." msgstr "無法依不明運算子進行過濾。" msgid "Unable to filter using the specified operator." msgstr "無法使用指定的運算子進行過濾。" msgid "Unable to filter using the specified range." msgstr "無法使用指定的範圍進行過濾。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "在「JSON 綱目」變更中找不到 '%s'" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "在 JSON 綱目變更中找不到 `op`。它必須是下列其中一項:%(available)s。" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "無法增加檔案描述子限制。要以非 root 使用者身分執行嗎?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "無法從配置檔 %(conf_file)s 載入 %(app_name)s。\n" "發生錯誤:%(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "無法載入綱目:%(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "找不到 %s 的 paste 配置檔。" msgid "Unexpected body type. Expected list/dict." msgstr "非預期的內文類型。預期為清單/字典。" #, python-format msgid "Unexpected response: %s" msgstr "非預期的回應:%s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "不明的鑑別策略 '%s'" #, python-format msgid "Unknown command: %s" msgstr "不明指令:%s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "不明的排序方向,必須為 'desc' 或 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "無法辨識的「JSON 綱目」草稿版本" msgid "Virtual size of image in bytes" msgstr "映像檔的虛擬大小(以位元組為單位)" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "等待 PID %(pid)s (%(file)s) 當掉已達到 15 秒;正在放棄" msgid "You are not authenticated." msgstr "您沒有進行鑑別。" msgid "You are not authorized to complete this action." msgstr "您未獲授權來完成此動作。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "您未獲授權來查閱映像檔 %s。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "您未獲授權來查閱映像檔 %s 的成員。" msgid "You are not permitted to create image members for the image." msgstr "不允許您給映像檔建立映像檔成員。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "不允許您建立擁有者為 '%s' 的映像檔。" msgid "You do not own this image" msgstr "您不是此映像檔的擁有者" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "您已選取在連接時使用 SSL,並且提供了憑證,但未提供 key_file 參數,也沒有設定 " "GLANCE_CLIENT_KEY_FILE 環境變數" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "您已選取在連接時使用 SSL,並且提供了金鑰,但未提供 cert_file 參數,也沒有設" "定 GLANCE_CLIENT_CERT_FILE 環境變數" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() 取得非預期的關鍵字引數 '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "更新時無法從 %(current)s 轉移至 %(next)s(需要 from_state = %(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "自訂內容 (%(props)s) 與基本內容相衝突" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "此平台上無法使用 eventlet 'poll' 及 'selects' 中心。" msgid "limit param must be an integer" msgstr "限制參數必須是整數" msgid "limit param must be positive" msgstr "限制參數必須是正數" msgid "md5 hash of image contents." msgstr "映像檔內容的 md5 雜湊值。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() 取得非預期的關鍵字 %s" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "無法啟動 %(serv)s。取得錯誤:%(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id 太長,大小上限為 %s" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/location.py0000664000175000017500000006472400000000000016506 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import copy import functools from cryptography import exceptions as crypto_exception from cursive import exception as cursive_exception from cursive import signature_utils import glance_store as store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from glance.common import exception from glance.common import format_inspector from glance.common import store_utils from glance.common import utils import glance.domain.proxy from glance.i18n import _, _LE, _LI, _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, store_api, store_utils): self.context = context self.store_api = store_api self.image_repo = image_repo proxy_kwargs = {'context': context, 'store_api': store_api, 'store_utils': store_utils} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) self.db_api = glance.db.get_api() def _set_acls(self, image): public = image.visibility in ['public', 'community'] member_ids = [] if image.locations and not public: member_repo = _get_member_repo_for_store(image, self.context, self.db_api, self.store_api) member_ids = [m.member_id for m in member_repo.list()] for location in image.locations: if CONF.enabled_backends: # NOTE(whoami-rajat): Do not set_acls if store is not defined # on this node. This is possible in case of edge deployment # that image location is present but the actual store is # not related to this node. image_store = location['metadata'].get('store') if image_store not in CONF.enabled_backends: msg = (_("Store %s is not available on " "this node, skipping `_set_acls` " "call.") % image_store) LOG.debug(msg) continue self.store_api.set_acls_for_multi_store( location['url'], image_store, public=public, read_tenants=member_ids, context=self.context ) else: self.store_api.set_acls(location['url'], public=public, read_tenants=member_ids, context=self.context) def add(self, image): result = super(ImageRepoProxy, self).add(image) self._set_acls(image) return result def save(self, image, from_state=None): result = super(ImageRepoProxy, self).save(image, from_state=from_state) self._set_acls(image) return result def get(self, image_id): image = super(ImageRepoProxy, self).get(image_id) if CONF.enabled_backends: try: store_utils.update_store_in_locations( self.context, image, self.image_repo) except exception.Forbidden: # NOTE(danms): We may not be able to complete a store # update if we do not own the image. That should not # break us, so avoid raising Forbidden in that # case. Note that modifications to @image here will # still be returned to the user, just not saved in the # DB. That is probably what we want anyway. pass return image def _get_member_repo_for_store(image, context, db_api, store_api): image_member_repo = glance.db.ImageMemberRepo(context, db_api, image) store_image_repo = glance.location.ImageMemberRepoProxy( image_member_repo, image, context, store_api) return store_image_repo def _check_location_uri(context, store_api, store_utils, uri, backend=None): """Check if an image location is valid. :param context: Glance request context :param store_api: store API module :param store_utils: store utils module :param uri: location's uri string :param backend: A backend name for the store """ try: # NOTE(zhiyan): Some stores return zero when it catch exception if CONF.enabled_backends: size_from_backend = store_api.get_size_from_uri_and_backend( uri, backend, context=context) else: size_from_backend = store_api.get_size_from_backend( uri, context=context) is_ok = (store_utils.validate_external_location(uri) and size_from_backend > 0) except (store.UnknownScheme, store.NotFound, store.BadStoreUri): is_ok = False if not is_ok: reason = _('Invalid location') raise exception.BadStoreUri(message=reason) def _check_image_location(context, store_api, store_utils, location): backend = None if CONF.enabled_backends: backend = location['metadata'].get('store') _check_location_uri(context, store_api, store_utils, location['url'], backend=backend) store_api.check_location_metadata(location['metadata']) def _set_image_size(context, image, locations): if not image.size: for location in locations: if CONF.enabled_backends: size_from_backend = store.get_size_from_uri_and_backend( location['url'], location['metadata'].get('store'), context=context) else: size_from_backend = store.get_size_from_backend( location['url'], context=context) if size_from_backend: # NOTE(flwang): This assumes all locations have the same size image.size = size_from_backend break def _count_duplicated_locations(locations, new): """ To calculate the count of duplicated locations for new one. :param locations: The exiting image location set :param new: The new image location :returns: The count of duplicated locations """ ret = 0 for loc in locations: if loc['url'] == new['url'] and loc['metadata'] == new['metadata']: ret += 1 return ret class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, factory, context, store_api, store_utils): self.context = context self.store_api = store_api self.store_utils = store_utils proxy_kwargs = {'context': context, 'store_api': store_api, 'store_utils': store_utils} super(ImageFactoryProxy, self).__init__(factory, proxy_class=ImageProxy, proxy_kwargs=proxy_kwargs) def new_image(self, **kwargs): locations = kwargs.get('locations', []) for loc in locations: _check_image_location(self.context, self.store_api, self.store_utils, loc) loc['status'] = 'active' if _count_duplicated_locations(locations, loc) > 1: raise exception.DuplicateLocation(location=loc['url']) return super(ImageFactoryProxy, self).new_image(**kwargs) @functools.total_ordering class StoreLocations(abc.MutableSequence): """ The proxy for store location property. It takes responsibility for:: 1. Location uri correctness checking when adding a new location. 2. Remove the image data from the store when a location is removed from an image. """ def __init__(self, image_proxy, value): self.image_proxy = image_proxy if isinstance(value, list): self.value = value else: self.value = list(value) def append(self, location): # NOTE(flaper87): Insert this # location at the very end of # the value list. self.insert(len(self.value), location) def extend(self, other): if isinstance(other, StoreLocations): locations = other.value else: locations = list(other) for location in locations: self.append(location) def insert(self, i, location): _check_image_location(self.image_proxy.context, self.image_proxy.store_api, self.image_proxy.store_utils, location) location['status'] = 'active' if _count_duplicated_locations(self.value, location) > 0: raise exception.DuplicateLocation(location=location['url']) self.value.insert(i, location) _set_image_size(self.image_proxy.context, self.image_proxy, [location]) def pop(self, i=-1): location = self.value.pop(i) try: self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) except store.exceptions.NotFound: # NOTE(rosmaita): This can happen if the data was deleted by an # operator from the backend, or a race condition from multiple # delete-from-store requests. The old way to deal with this was # that the user could just delete the image when the data is gone, # but with multi-store, that is no longer a good option. So we # intentionally leave the location popped (in other words, the # pop() succeeds) but we also reraise the NotFound so that the # calling code knows what happened. with excutils.save_and_reraise_exception(): pass except Exception: with excutils.save_and_reraise_exception(): self.value.insert(i, location) return location def count(self, location): return self.value.count(location) def index(self, location, *args): return self.value.index(location, *args) def remove(self, location): if self.count(location): self.pop(self.index(location)) else: self.value.remove(location) def reverse(self): self.value.reverse() # Mutable sequence, so not hashable __hash__ = None def __getitem__(self, i): return self.value.__getitem__(i) def __setitem__(self, i, location): _check_image_location(self.image_proxy.context, self.image_proxy.store_api, self.image_proxy.store_utils, location) location['status'] = 'active' self.value.__setitem__(i, location) _set_image_size(self.image_proxy.context, self.image_proxy, [location]) def __delitem__(self, i): if isinstance(i, slice): if i.step not in (None, 1): raise NotImplementedError("slice with step") self.__delslice__(i.start, i.stop) return location = None try: location = self.value[i] except Exception: del self.value[i] return self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) del self.value[i] def __delslice__(self, i, j): i = 0 if i is None else max(i, 0) j = len(self) if j is None else max(j, 0) locations = [] try: locations = self.value[i:j] except Exception: del self.value[i:j] return for location in locations: self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) del self.value[i] def __iadd__(self, other): self.extend(other) return self def __contains__(self, location): return location in self.value def __len__(self): return len(self.value) def __cast(self, other): if isinstance(other, StoreLocations): return other.value else: return other def __eq__(self, other): return self.value == self.__cast(other) def __lt__(self, other): return self.value < self.__cast(other) def __iter__(self): return iter(self.value) def __copy__(self): return type(self)(self.image_proxy, self.value) def __deepcopy__(self, memo): # NOTE(zhiyan): Only copy location entries, others can be reused. value = copy.deepcopy(self.value, memo) self.image_proxy.image.locations = value return type(self)(self.image_proxy, value) def _locations_proxy(target, attr): """ Make a location property proxy on the image object. :param target: the image object on which to add the proxy :param attr: the property proxy we want to hook """ def get_attr(self): value = getattr(getattr(self, target), attr) return StoreLocations(self, value) def set_attr(self, value): if not isinstance(value, (list, StoreLocations)): reason = _('Invalid locations') raise exception.BadStoreUri(message=reason) ori_value = getattr(getattr(self, target), attr) if ori_value != value: # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, that means user would like to only # update the metadata, not the URL. ordered_value = sorted([loc['url'] for loc in value]) ordered_ori = sorted([loc['url'] for loc in ori_value]) if len(ori_value) > 0 and ordered_value != ordered_ori: raise exception.Invalid(_('Original locations is not empty: ' '%s') % ori_value) # NOTE(zhiyan): Check locations are all valid # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, then it's not necessary to verify those # locations again. Otherwise, if there is any restricted scheme in # existing locations. _check_image_location will fail. if ordered_value != ordered_ori: for loc in value: _check_image_location(self.context, self.store_api, self.store_utils, loc) loc['status'] = 'active' if _count_duplicated_locations(value, loc) > 1: raise exception.DuplicateLocation(location=loc['url']) _set_image_size(self.context, getattr(self, target), value) else: for loc in value: loc['status'] = 'active' return setattr(getattr(self, target), attr, list(value)) def del_attr(self): value = getattr(getattr(self, target), attr) while len(value): self.store_utils.delete_image_location_from_backend( self.context, self.image.image_id, value[0]) del value[0] setattr(getattr(self, target), attr, value) return delattr(getattr(self, target), attr) return property(get_attr, set_attr, del_attr) class ImageProxy(glance.domain.proxy.Image): locations = _locations_proxy('image', 'locations') def __init__(self, image, context, store_api, store_utils): self.image = image self.context = context self.store_api = store_api self.store_utils = store_utils proxy_kwargs = { 'context': context, 'image': self, 'store_api': store_api, } super(ImageProxy, self).__init__( image, member_repo_proxy_class=ImageMemberRepoProxy, member_repo_proxy_kwargs=proxy_kwargs) def delete(self): self.image.delete() if self.image.locations: for location in self.image.locations: self.store_utils.delete_image_location_from_backend( self.context, self.image.image_id, location) def _upload_to_store(self, data, verifier, store=None, size=None): """ Upload data to store :param data: data to upload to store :param verifier: for signature verification :param store: store to upload data to :param size: data size :return: """ hashing_algo = self.image.os_hash_algo or CONF['hashing_algorithm'] if CONF.enabled_backends: (location, size, checksum, multihash, loc_meta) = self.store_api.add_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, store, hashing_algo, context=self.context, verifier=verifier) else: (location, size, checksum, multihash, loc_meta) = self.store_api.add_to_backend_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, hashing_algo, context=self.context, verifier=verifier) self._verify_signature(verifier, location, loc_meta) for attr, data in {"size": size, "os_hash_value": multihash, "checksum": checksum}.items(): self._verify_uploaded_data(data, attr) self.image.locations.append({'url': location, 'metadata': loc_meta, 'status': 'active'}) self.image.checksum = checksum self.image.os_hash_value = multihash self.image.size = size self.image.os_hash_algo = hashing_algo def _verify_signature(self, verifier, location, loc_meta): """ Verify signature of uploaded data. :param verifier: for signature verification """ # NOTE(bpoulos): if verification fails, exception will be raised if verifier is not None: try: verifier.verify() msg = _LI("Successfully verified signature for image %s") LOG.info(msg, self.image.image_id) except crypto_exception.InvalidSignature: if CONF.enabled_backends: self.store_api.delete(location, loc_meta.get('store'), context=self.context) else: self.store_api.delete_from_backend(location, context=self.context) raise cursive_exception.SignatureVerificationError( _('Signature verification failed') ) def _verify_uploaded_data(self, value, attribute_name): """ Verify value of attribute_name uploaded data :param value: value to compare :param attribute_name: attribute name of the image to compare with """ image_value = getattr(self.image, attribute_name) if image_value is not None and value != image_value: msg = _("%s of uploaded data is different from current " "value set on the image.") LOG.error(msg, attribute_name) raise exception.UploadException(msg % attribute_name) def set_data(self, data, size=None, backend=None, set_active=True): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) extra_props = self.image.extra_properties verifier = None if signature_utils.should_create_verifier(extra_props): # NOTE(bpoulos): if creating verifier fails, exception will be # raised img_signature = extra_props[signature_utils.SIGNATURE] hash_method = extra_props[signature_utils.HASH_METHOD] key_type = extra_props[signature_utils.KEY_TYPE] cert_uuid = extra_props[signature_utils.CERT_UUID] verifier = signature_utils.get_verifier( context=self.context, img_signature_certificate_uuid=cert_uuid, img_signature_hash_method=hash_method, img_signature=img_signature, img_signature_key_type=key_type ) if not self.image.virtual_size: inspector = format_inspector.get_inspector(self.image.disk_format) else: # No need to do this again inspector = None if inspector and self.image.container_format == 'bare': fmt = inspector() data = format_inspector.InfoWrapper(data, fmt) LOG.debug('Enabling in-flight format inspection for %s', fmt) else: fmt = None self._upload_to_store(data, verifier, backend, size) virtual_size = 0 if fmt and fmt.format_match: try: virtual_size = fmt.virtual_size LOG.info('Image format matched and virtual size computed: %i', virtual_size) except Exception as e: LOG.error(_LE('Unable to determine virtual_size because: %s'), e) elif fmt: LOG.warning('Image format %s did not match; ' 'unable to calculate virtual size', self.image.disk_format) if virtual_size: self.image.virtual_size = fmt.virtual_size if set_active and self.image.status != 'active': self.image.status = 'active' def get_data(self, offset=0, chunk_size=None): if not self.image.locations: # NOTE(mclaren): This is the only set of arguments # which work with this exception currently, see: # https://bugs.launchpad.net/glance-store/+bug/1501443 # When the above glance_store bug is fixed we can # add a msg as usual. raise store.NotFound(image=None) err = None for loc in self.image.locations: try: backend = loc['metadata'].get('store') if CONF.enabled_backends: data, size = self.store_api.get( loc['url'], backend, offset=offset, chunk_size=chunk_size, context=self.context ) else: data, size = self.store_api.get_from_backend( loc['url'], offset=offset, chunk_size=chunk_size, context=self.context) return data except Exception as e: LOG.warning(_LW('Get image %(id)s data failed: ' '%(err)s.'), {'id': self.image.image_id, 'err': encodeutils.exception_to_unicode(e)}) err = e # tried all locations LOG.error(_LE('Glance tried all active locations to get data for ' 'image %s but all have failed.'), self.image.image_id) raise err class ImageMemberRepoProxy(glance.domain.proxy.Repo): def __init__(self, repo, image, context, store_api): self.repo = repo self.image = image self.context = context self.store_api = store_api super(ImageMemberRepoProxy, self).__init__(repo) def _set_acls(self): public = self.image.visibility in ['public', 'community'] if self.image.locations and not public: member_ids = [m.member_id for m in self.repo.list()] for location in self.image.locations: if CONF.enabled_backends: # NOTE(whoami-rajat): Do not set_acls if store is not # defined on this node. This is possible in case of edge # deployment that image location is present but the actual # store is not related to this node. image_store = location['metadata'].get('store') if image_store not in CONF.enabled_backends: msg = (_("Store %s is not available on " "this node, skipping `_set_acls` " "call.") % image_store) LOG.debug(msg) continue self.store_api.set_acls_for_multi_store( location['url'], image_store, public=public, read_tenants=member_ids, context=self.context ) else: self.store_api.set_acls(location['url'], public=public, read_tenants=member_ids, context=self.context) def add(self, member): super(ImageMemberRepoProxy, self).add(member) self._set_acls() def remove(self, member): super(ImageMemberRepoProxy, self).remove(member) self._set_acls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/notifier.py0000664000175000017500000007770100000000000016514 0ustar00zuulzuul00000000000000# Copyright 2011, OpenStack Foundation # Copyright 2012, Red Hat, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import webob from glance.common import exception from glance.common import timeutils from glance.domain import proxy as domain_proxy from glance.i18n import _, _LE notifier_opts = [ cfg.StrOpt('default_publisher_id', default="image.localhost", help=_(""" Default publisher_id for outgoing Glance notifications. This is the value that the notification driver will use to identify messages for events originating from the Glance service. Typically, this is the hostname of the instance that generated the message. Possible values: * Any reasonable instance identifier, for example: image.host1 Related options: * None """)), cfg.ListOpt('disabled_notifications', default=[], help=_(""" List of notifications to be disabled. Specify a list of notifications that should not be emitted. A notification can be given either as a notification type to disable a single event notification, or as a notification group prefix to disable all event notifications within a group. Possible values: A comma-separated list of individual notification types or notification groups to be disabled. Currently supported groups: * image * image.member * task * metadef_namespace * metadef_object * metadef_property * metadef_resource_type * metadef_tag For a complete listing and description of each event refer to: https://docs.openstack.org/glance/latest/admin/notifications.html The values must be specified as: . For example: image.create,task.success,metadef_tag Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) LOG = logging.getLogger(__name__) def set_defaults(control_exchange='glance'): oslo_messaging.set_transport_defaults(control_exchange) def get_transport(): return oslo_messaging.get_notification_transport(CONF) class Notifier(object): """Uses a notification strategy to send out messages about events.""" def __init__(self): publisher_id = CONF.default_publisher_id self._transport = get_transport() self._notifier = oslo_messaging.Notifier(self._transport, publisher_id=publisher_id) def warn(self, event_type, payload): self._notifier.warn({}, event_type, payload) def info(self, event_type, payload): self._notifier.info({}, event_type, payload) def error(self, event_type, payload): self._notifier.error({}, event_type, payload) def _get_notification_group(notification): return notification.split('.', 1)[0] def _is_notification_enabled(notification): disabled_notifications = CONF.disabled_notifications notification_group = _get_notification_group(notification) notifications = (notification, notification_group) for disabled_notification in disabled_notifications: if disabled_notification in notifications: return False return True def _send_notification(notify, notification_type, payload): if _is_notification_enabled(notification_type): notify(notification_type, payload) def format_image_notification(image): """ Given a glance.domain.Image object, return a dictionary of relevant notification information. We purposely do not include 'location' as it may contain credentials. """ return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': timeutils.isotime(image.created_at), 'updated_at': timeutils.isotime(image.updated_at), 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'checksum': image.checksum, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'virtual_size': image.virtual_size, 'is_public': image.visibility == 'public', 'visibility': image.visibility, 'properties': dict(image.extra_properties), 'tags': list(image.tags), 'deleted': False, 'deleted_at': None, } def format_image_member_notification(image_member): """Given a glance.domain.ImageMember object, return a dictionary of relevant notification information. """ return { 'image_id': image_member.image_id, 'member_id': image_member.member_id, 'status': image_member.status, 'created_at': timeutils.isotime(image_member.created_at), 'updated_at': timeutils.isotime(image_member.updated_at), 'deleted': False, 'deleted_at': None, } def format_task_notification(task): # NOTE(nikhil): input is not passed to the notifier payload as it may # contain sensitive info. return { 'id': task.task_id, 'type': task.type, 'status': task.status, 'result': None, 'owner': task.owner, 'message': None, 'expires_at': timeutils.isotime(task.expires_at), 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_namespace_notification(metadef_namespace): return { 'namespace': metadef_namespace.namespace, 'namespace_old': metadef_namespace.namespace, 'display_name': metadef_namespace.display_name, 'protected': metadef_namespace.protected, 'visibility': metadef_namespace.visibility, 'owner': metadef_namespace.owner, 'description': metadef_namespace.description, 'created_at': timeutils.isotime(metadef_namespace.created_at), 'updated_at': timeutils.isotime(metadef_namespace.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_object_notification(metadef_object): object_properties = metadef_object.properties or {} properties = [] for name, prop in object_properties.items(): object_property = _format_metadef_object_property(name, prop) properties.append(object_property) return { 'namespace': metadef_object.namespace, 'name': metadef_object.name, 'name_old': metadef_object.name, 'properties': properties, 'required': metadef_object.required, 'description': metadef_object.description, 'created_at': timeutils.isotime(metadef_object.created_at), 'updated_at': timeutils.isotime(metadef_object.updated_at), 'deleted': False, 'deleted_at': None, } def _format_metadef_object_property(name, metadef_property): return { 'name': name, 'type': metadef_property.type or None, 'title': metadef_property.title or None, 'description': metadef_property.description or None, 'default': metadef_property.default or None, 'minimum': metadef_property.minimum or None, 'maximum': metadef_property.maximum or None, 'enum': metadef_property.enum or None, 'pattern': metadef_property.pattern or None, 'minLength': metadef_property.minLength or None, 'maxLength': metadef_property.maxLength or None, 'confidential': metadef_property.confidential or None, 'items': metadef_property.items or None, 'uniqueItems': metadef_property.uniqueItems or None, 'minItems': metadef_property.minItems or None, 'maxItems': metadef_property.maxItems or None, 'additionalItems': metadef_property.additionalItems or None, } def format_metadef_property_notification(metadef_property): schema = metadef_property.schema return { 'namespace': metadef_property.namespace, 'name': metadef_property.name, 'name_old': metadef_property.name, 'type': schema.get('type'), 'title': schema.get('title'), 'description': schema.get('description'), 'default': schema.get('default'), 'minimum': schema.get('minimum'), 'maximum': schema.get('maximum'), 'enum': schema.get('enum'), 'pattern': schema.get('pattern'), 'minLength': schema.get('minLength'), 'maxLength': schema.get('maxLength'), 'confidential': schema.get('confidential'), 'items': schema.get('items'), 'uniqueItems': schema.get('uniqueItems'), 'minItems': schema.get('minItems'), 'maxItems': schema.get('maxItems'), 'additionalItems': schema.get('additionalItems'), 'deleted': False, 'deleted_at': None, } def format_metadef_resource_type_notification(metadef_resource_type): return { 'namespace': metadef_resource_type.namespace, 'name': metadef_resource_type.name, 'name_old': metadef_resource_type.name, 'prefix': metadef_resource_type.prefix, 'properties_target': metadef_resource_type.properties_target, 'created_at': timeutils.isotime(metadef_resource_type.created_at), 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_tag_notification(metadef_tag): return { 'namespace': metadef_tag.namespace, 'name': metadef_tag.name, 'name_old': metadef_tag.name, 'created_at': timeutils.isotime(metadef_tag.created_at), 'updated_at': timeutils.isotime(metadef_tag.updated_at), 'deleted': False, 'deleted_at': None, } class NotificationBase(object): def get_payload(self, obj): return {} def send_notification(self, notification_id, obj, extra_payload=None, backend=None): payload = self.get_payload(obj) if extra_payload is not None: payload.update(extra_payload) # update backend information in the notification if backend: payload["backend"] = backend _send_notification(self.notifier.info, notification_id, payload) class NotificationProxy(NotificationBase, metaclass=abc.ABCMeta): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier super_class = self.get_super_class() super_class.__init__(self, repo) @abc.abstractmethod def get_super_class(self): pass class NotificationRepoProxy(NotificationBase, metaclass=abc.ABCMeta): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, repo, proxy_class, proxy_kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass class NotificationFactoryProxy(metaclass=abc.ABCMeta): def __init__(self, factory, context, notifier): kwargs = {'context': context, 'notifier': notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, factory, proxy_class, kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass class ImageProxy(NotificationProxy, domain_proxy.Image): def get_super_class(self): return domain_proxy.Image def get_payload(self, obj): return format_image_notification(obj) def _format_image_send(self, bytes_sent): return { 'bytes_sent': bytes_sent, 'image_id': self.repo.image_id, 'owner_id': self.repo.owner, 'receiver_tenant_id': self.context.project_id, 'receiver_user_id': self.context.user_id, } def _format_import_properties(self): importing = self.repo.extra_properties.get( 'os_glance_importing_to_stores') importing = importing.split(',') if importing else [] failed = self.repo.extra_properties.get('os_glance_failed_import') failed = failed.split(',') if failed else [] return { 'os_glance_importing_to_stores': importing, 'os_glance_failed_import': failed } def _get_chunk_data_iterator(self, data, chunk_size=None): sent = 0 for chunk in data: yield chunk sent += len(chunk) if sent != (chunk_size or self.repo.size): notify = self.notifier.error else: notify = self.notifier.info try: _send_notification(notify, 'image.send', self._format_image_send(sent)) except Exception as err: msg = (_LE("An error occurred during image.send" " notification: %(err)s") % {'err': err}) LOG.error(msg) def get_data(self, offset=0, chunk_size=None): # Due to the need of evaluating subsequent proxies, this one # should return a generator, the call should be done before # generator creation data = self.repo.get_data(offset=offset, chunk_size=chunk_size) return self._get_chunk_data_iterator(data, chunk_size=chunk_size) def set_data(self, data, size=None, backend=None, set_active=True): self.send_notification('image.prepare', self.repo, backend=backend, extra_payload=self._format_import_properties()) notify_error = self.notifier.error status = self.repo.status try: self.repo.set_data(data, size, backend=backend, set_active=set_active) except glance_store.StorageFull as e: msg = (_("Image storage media is full: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) except glance_store.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg) except ValueError as e: msg = (_("Cannot save data for image %(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except exception.Duplicate as e: msg = (_("Unable to upload duplicate image data for image " "%(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.Forbidden as e: msg = (_("Not allowed to upload image data for image %(image_id)s:" " %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.NotFound as e: exc_str = encodeutils.exception_to_unicode(e) msg = (_("Image %(image_id)s could not be found after upload." " The image may have been deleted during the upload:" " %(error)s") % {'image_id': self.repo.image_id, 'error': exc_str}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPNotFound(explanation=exc_str) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s" " due to HTTP error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s " "due to internal error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) else: extra_payload = self._format_import_properties() self.send_notification('image.upload', self.repo, extra_payload=extra_payload) if set_active and status != 'active': self.send_notification('image.activate', self.repo) class ImageMemberProxy(NotificationProxy, domain_proxy.ImageMember): def get_super_class(self): return domain_proxy.ImageMember class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory): def get_super_class(self): return domain_proxy.ImageFactory def get_proxy_class(self): return ImageProxy class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo): def get_super_class(self): return domain_proxy.Repo def get_proxy_class(self): return ImageProxy def get_payload(self, obj): return format_image_notification(obj) def save(self, image, from_state=None): super(ImageRepoProxy, self).save(image, from_state=from_state) self.send_notification('image.update', image) def add(self, image): super(ImageRepoProxy, self).add(image) self.send_notification('image.create', image) def remove(self, image): super(ImageRepoProxy, self).remove(image) self.send_notification('image.delete', image, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class ImageMemberRepoProxy(NotificationBase, domain_proxy.MemberRepo): def __init__(self, repo, image, context, notifier): self.repo = repo self.image = image self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, image, repo, proxy_class, proxy_kwargs) def get_super_class(self): return domain_proxy.MemberRepo def get_proxy_class(self): return ImageMemberProxy def get_payload(self, obj): return format_image_member_notification(obj) def save(self, member, from_state=None): super(ImageMemberRepoProxy, self).save(member, from_state=from_state) self.send_notification('image.member.update', member) def add(self, member): super(ImageMemberRepoProxy, self).add(member) self.send_notification('image.member.create', member) def remove(self, member): super(ImageMemberRepoProxy, self).remove(member) self.send_notification('image.member.delete', member, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class TaskProxy(NotificationProxy, domain_proxy.Task): def get_super_class(self): return domain_proxy.Task def get_payload(self, obj): return format_task_notification(obj) def begin_processing(self): super(TaskProxy, self).begin_processing() self.send_notification('task.processing', self.repo) def succeed(self, result): super(TaskProxy, self).succeed(result) self.send_notification('task.success', self.repo) def fail(self, message): super(TaskProxy, self).fail(message) self.send_notification('task.failure', self.repo) def run(self, executor): super(TaskProxy, self).run(executor) self.send_notification('task.run', self.repo) class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory): def get_super_class(self): return domain_proxy.TaskFactory def get_proxy_class(self): return TaskProxy class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo): def get_super_class(self): return domain_proxy.TaskRepo def get_proxy_class(self): return TaskProxy def get_payload(self, obj): return format_task_notification(obj) def add(self, task): result = super(TaskRepoProxy, self).add(task) self.send_notification('task.create', task) return result def remove(self, task): result = super(TaskRepoProxy, self).remove(task) self.send_notification('task.delete', task, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) return result class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub): def get_super_class(self): return domain_proxy.TaskStub class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo): def get_super_class(self): return domain_proxy.TaskStubRepo def get_proxy_class(self): return TaskStubProxy class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace): def get_super_class(self): return domain_proxy.MetadefNamespace class MetadefNamespaceFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefNamespaceFactory): def get_super_class(self): return domain_proxy.MetadefNamespaceFactory def get_proxy_class(self): return MetadefNamespaceProxy class MetadefNamespaceRepoProxy(NotificationRepoProxy, domain_proxy.MetadefNamespaceRepo): def get_super_class(self): return domain_proxy.MetadefNamespaceRepo def get_proxy_class(self): return MetadefNamespaceProxy def get_payload(self, obj): return format_metadef_namespace_notification(obj) def save(self, metadef_namespace): name = getattr(metadef_namespace, '_old_namespace', metadef_namespace.namespace) result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace) self.send_notification( 'metadef_namespace.update', metadef_namespace, extra_payload={ 'namespace_old': name, }) return result def add(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace) self.send_notification('metadef_namespace.create', metadef_namespace) return result def remove(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove( metadef_namespace) self.send_notification( 'metadef_namespace.delete', metadef_namespace, extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()} ) return result def remove_objects(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_objects( metadef_namespace) self.send_notification('metadef_namespace.delete_objects', metadef_namespace) return result def remove_properties(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_properties( metadef_namespace) self.send_notification('metadef_namespace.delete_properties', metadef_namespace) return result def remove_tags(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_tags( metadef_namespace) self.send_notification('metadef_namespace.delete_tags', metadef_namespace) return result class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject): def get_super_class(self): return domain_proxy.MetadefObject class MetadefObjectFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefObjectFactory): def get_super_class(self): return domain_proxy.MetadefObjectFactory def get_proxy_class(self): return MetadefObjectProxy class MetadefObjectRepoProxy(NotificationRepoProxy, domain_proxy.MetadefObjectRepo): def get_super_class(self): return domain_proxy.MetadefObjectRepo def get_proxy_class(self): return MetadefObjectProxy def get_payload(self, obj): return format_metadef_object_notification(obj) def save(self, metadef_object): name = getattr(metadef_object, '_old_name', metadef_object.name) result = super(MetadefObjectRepoProxy, self).save(metadef_object) self.send_notification( 'metadef_object.update', metadef_object, extra_payload={ 'namespace': metadef_object.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_object): result = super(MetadefObjectRepoProxy, self).add(metadef_object) self.send_notification('metadef_object.create', metadef_object) return result def remove(self, metadef_object): result = super(MetadefObjectRepoProxy, self).remove(metadef_object) self.send_notification( 'metadef_object.delete', metadef_object, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_object.namespace.namespace } ) return result class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty): def get_super_class(self): return domain_proxy.MetadefProperty class MetadefPropertyFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefPropertyFactory): def get_super_class(self): return domain_proxy.MetadefPropertyFactory def get_proxy_class(self): return MetadefPropertyProxy class MetadefPropertyRepoProxy(NotificationRepoProxy, domain_proxy.MetadefPropertyRepo): def get_super_class(self): return domain_proxy.MetadefPropertyRepo def get_proxy_class(self): return MetadefPropertyProxy def get_payload(self, obj): return format_metadef_property_notification(obj) def save(self, metadef_property): name = getattr(metadef_property, '_old_name', metadef_property.name) result = super(MetadefPropertyRepoProxy, self).save(metadef_property) self.send_notification( 'metadef_property.update', metadef_property, extra_payload={ 'namespace': metadef_property.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).add(metadef_property) self.send_notification('metadef_property.create', metadef_property) return result def remove(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).remove(metadef_property) self.send_notification( 'metadef_property.delete', metadef_property, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_property.namespace.namespace } ) return result class MetadefResourceTypeProxy(NotificationProxy, domain_proxy.MetadefResourceType): def get_super_class(self): return domain_proxy.MetadefResourceType class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefResourceTypeFactory): def get_super_class(self): return domain_proxy.MetadefResourceTypeFactory def get_proxy_class(self): return MetadefResourceTypeProxy class MetadefResourceTypeRepoProxy(NotificationRepoProxy, domain_proxy.MetadefResourceTypeRepo): def get_super_class(self): return domain_proxy.MetadefResourceTypeRepo def get_proxy_class(self): return MetadefResourceTypeProxy def get_payload(self, obj): return format_metadef_resource_type_notification(obj) def add(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).add( md_resource_type) self.send_notification('metadef_resource_type.create', md_resource_type) return result def remove(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).remove( md_resource_type) self.send_notification( 'metadef_resource_type.delete', md_resource_type, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': md_resource_type.namespace.namespace } ) return result class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag): def get_super_class(self): return domain_proxy.MetadefTag class MetadefTagFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefTagFactory): def get_super_class(self): return domain_proxy.MetadefTagFactory def get_proxy_class(self): return MetadefTagProxy class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo): def get_super_class(self): return domain_proxy.MetadefTagRepo def get_proxy_class(self): return MetadefTagProxy def get_payload(self, obj): return format_metadef_tag_notification(obj) def save(self, metadef_tag): name = getattr(metadef_tag, '_old_name', metadef_tag.name) result = super(MetadefTagRepoProxy, self).save(metadef_tag) self.send_notification( 'metadef_tag.update', metadef_tag, extra_payload={ 'namespace': metadef_tag.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_tag): result = super(MetadefTagRepoProxy, self).add(metadef_tag) self.send_notification('metadef_tag.create', metadef_tag) return result def add_tags(self, metadef_tags, can_append=False): result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags, can_append) for metadef_tag in metadef_tags: self.send_notification('metadef_tag.create', metadef_tag) return result def remove(self, metadef_tag): result = super(MetadefTagRepoProxy, self).remove(metadef_tag) self.send_notification( 'metadef_tag.delete', metadef_tag, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_tag.namespace.namespace } ) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/opts.py0000664000175000017500000001072600000000000015654 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_api_opts', 'list_scrubber_opts', 'list_cache_opts', 'list_manage_opts', 'list_image_import_opts', ] import copy import itertools from osprofiler import opts as profiler import glance.api.middleware.context import glance.api.versions import glance.async_.flows._internal_plugins import glance.async_.flows.api_image_import import glance.async_.flows.convert from glance.async_.flows.plugins import plugin_opts import glance.async_.taskflow_executor import glance.common.config import glance.common.property_utils import glance.common.wsgi import glance.image_cache import glance.image_cache.drivers.sqlite import glance.notifier import glance.scrubber _api_opts = [ (None, list(itertools.chain( glance.api.middleware.context.context_opts, glance.api.versions.versions_opts, glance.common.config.common_opts, glance.common.property_utils.property_opts, glance.common.wsgi.bind_opts, glance.common.wsgi.eventlet_opts, glance.common.wsgi.socket_opts, glance.common.wsgi.store_opts, glance.common.wsgi.cli_opts, glance.image_cache.drivers.sqlite.sqlite_opts, glance.image_cache.image_cache_opts, glance.notifier.notifier_opts, glance.scrubber.scrubber_opts))), ('image_format', glance.common.config.image_format_opts), ('task', glance.common.config.task_opts), ('taskflow_executor', list(itertools.chain( glance.async_.taskflow_executor.taskflow_executor_opts, glance.async_.flows.convert.convert_task_opts))), profiler.list_opts()[0], ('paste_deploy', glance.common.config.paste_deploy_opts), ('wsgi', glance.common.config.wsgi_opts), ] _scrubber_opts = [ (None, list(itertools.chain( glance.common.config.common_opts, glance.scrubber.scrubber_opts, glance.scrubber.scrubber_cmd_opts, glance.scrubber.scrubber_cmd_cli_opts))), ] _cache_opts = [ (None, list(itertools.chain( glance.common.config.common_opts, glance.image_cache.drivers.sqlite.sqlite_opts, glance.image_cache.image_cache_opts))), ] _manage_opts = [ (None, []) ] _image_import_opts = [ ('image_import_opts', glance.async_.flows.api_image_import.api_import_opts), ('import_filtering_opts', glance.async_.flows._internal_plugins.import_filtering_opts), ('glance_download_opts', glance.async_.flows.api_image_import.glance_download_opts) ] def list_api_opts(): """Return a list of oslo_config options available in Glance API service. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'glance.api' entry point under the 'oslo_config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by Glance. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _api_opts] def list_scrubber_opts(): """Return a list of oslo_config options available in Glance Scrubber service. """ return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts] def list_cache_opts(): """Return a list of oslo_config options available in Glance Cache service. """ return [(g, copy.deepcopy(o)) for g, o in _cache_opts] def list_manage_opts(): """Return a list of oslo_config options available in Glance manage.""" return [(g, copy.deepcopy(o)) for g, o in _manage_opts] def list_image_import_opts(): """Return a list of oslo_config options available for Image Import""" opts = copy.deepcopy(_image_import_opts) opts.extend(plugin_opts.get_plugin_opts()) return [(g, copy.deepcopy(o)) for g, o in opts] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8823059 glance-29.0.0/glance/policies/0000775000175000017500000000000000000000000016116 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/__init__.py0000664000175000017500000000200300000000000020222 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from glance.policies import base from glance.policies import cache from glance.policies import discovery from glance.policies import image from glance.policies import metadef from glance.policies import tasks def list_rules(): return itertools.chain( base.list_rules(), image.list_rules(), tasks.list_rules(), metadef.list_rules(), cache.list_rules(), discovery.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/base.py0000664000175000017500000001247400000000000017412 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy # Generic check string for checking if a user is authorized on a particular # project, specifically with the member role. PROJECT_MEMBER = 'role:member and project_id:%(project_id)s' # Generic check string for checking if a user is authorized on a particular # project but with read-only access. For example, this persona would be able to # list private images owned by a project but cannot make any writeable changes # to those images. PROJECT_READER = 'role:reader and project_id:%(project_id)s' # Make sure the member_id of the supplied target matches the project_id from # the context object, which is derived from keystone tokens. IMAGE_MEMBER_CHECK = 'project_id:%(member_id)s' # Check if the visibility of the image supplied in the target matches # "community" COMMUNITY_VISIBILITY_CHECK = "'community':%(visibility)s" # Check if the visibility of the resource supplied in the target matches # "public" PUBLIC_VISIBILITY_CHECK = "'public':%(visibility)s" # Check if the visibility of the image supplied in the target matches "shared" SHARED_VISIBILITY_CHECK = "'shared':%(visibility)s" PROJECT_MEMBER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED = ( f'role:member and (project_id:%(project_id)s or {IMAGE_MEMBER_CHECK} ' f'or {COMMUNITY_VISIBILITY_CHECK} or {PUBLIC_VISIBILITY_CHECK} ' f'or {SHARED_VISIBILITY_CHECK})' ) PROJECT_READER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED = ( f'role:reader and (project_id:%(project_id)s or {IMAGE_MEMBER_CHECK} ' f'or {COMMUNITY_VISIBILITY_CHECK} or {PUBLIC_VISIBILITY_CHECK} ' f'or {SHARED_VISIBILITY_CHECK})' ) PROJECT_READER_OR_PUBLIC_NAMESPACE = ( f'role:reader and (project_id:%(project_id)s or {PUBLIC_VISIBILITY_CHECK})' ) # FIXME(lbragstad): These are composite check strings that represents glance's # authorization code, some of which is implemented in the authorization wrapper # and some is in the database driver. # # These check strings do not support tenancy with the `admin` role. This means # anyone with the `admin` role on any project can execute a policy, which is # typical in OpenStack services. But following check strings offer formal # support for project membership and a read-only variant consistent with # other OpenStack services. ADMIN = 'rule:context_is_admin' DEFAULT = 'rule:default' ADMIN_OR_PROJECT_MEMBER = f'{ADMIN} or ({PROJECT_MEMBER})' ADMIN_OR_PROJECT_READER = f'{ADMIN} or ({PROJECT_READER})' ADMIN_OR_PROJECT_READER_GET_IMAGE = ( f'{ADMIN} or ' f'({PROJECT_READER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED})' ) ADMIN_OR_PROJECT_MEMBER_DOWNLOAD_IMAGE = ( f'{ADMIN} or ' f'({PROJECT_MEMBER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED})' ) ADMIN_OR_PROJECT_MEMBER_CREATE_IMAGE = ( f'{ADMIN} or ({PROJECT_MEMBER} and project_id:%(owner)s)' ) ADMIN_OR_PROJECT_READER_GET_NAMESPACE = ( f'{ADMIN} or ({PROJECT_READER_OR_PUBLIC_NAMESPACE})' ) ADMIN_OR_SHARED_MEMBER = ( f'{ADMIN} or (role:member and {IMAGE_MEMBER_CHECK})' ) ADMIN_OR_PROJECT_READER_OR_SHARED_MEMBER = ( f'{ADMIN} or ' f'role:reader and (project_id:%(project_id)s or {IMAGE_MEMBER_CHECK})' ) SERVICE_OR_PROJECT_MEMBER = ( f'rule:service_api or ({PROJECT_MEMBER} and project_id:%(owner)s)' ) SERVICE = 'rule:service_api' rules = [ policy.RuleDefault(name='default', check_str='', description='Defines the default rule used for ' 'policies that historically had an empty ' 'policy in the supplied policy.json file.', deprecated_rule=policy.DeprecatedRule( name='default', check_str=ADMIN, deprecated_reason='In order to allow operators to ' 'accept the default policies from code by not ' 'defining them in the policy file, while still ' 'working with old policy files that rely on the ' '``default`` rule for policies that are ' 'not specified in the policy file, the ``default`` ' 'rule must now be explicitly set to ' '``"role:admin"`` when that is the desired default ' 'for unspecified rules.', deprecated_since='Ussuri')), policy.RuleDefault(name='context_is_admin', check_str='role:admin', description='Defines the rule for the is_admin:True ' 'check.'), policy.RuleDefault(name='service_api', check_str='role:service', description='Default rule for the service-to-service ' 'API.'), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/cache.py0000664000175000017500000000460200000000000017535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from glance.policies import base DEPRECATED_REASON = """ The image API now supports roles. """ cache_policies = [ policy.DocumentedRuleDefault( name="cache_image", check_str=base.ADMIN, scope_types=['project'], description='Queue image for caching', operations=[ {'path': '/v2/cache/{image_id}', 'method': 'PUT'} ], deprecated_rule=policy.DeprecatedRule( name="cache_image", check_str="rule:manage_image_cache", deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="cache_list", check_str=base.ADMIN, scope_types=['project'], description='List cache status', operations=[ {'path': '/v2/cache', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="cache_list", check_str="rule:manage_image_cache", deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="cache_delete", check_str=base.ADMIN, scope_types=['project'], description='Delete image(s) from cache and/or queue', operations=[ {'path': '/v2/cache', 'method': 'DELETE'}, {'path': '/v2/cache/{image_id}', 'method': 'DELETE'} ], deprecated_rule=policy.DeprecatedRule( name="cache_delete", check_str="rule:manage_image_cache", deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), ] def list_rules(): return cache_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/discovery.py0000664000175000017500000000206200000000000020477 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from glance.policies import base discovery_policies = [ policy.DocumentedRuleDefault( name="stores_info_detail", check_str=base.ADMIN, scope_types=['project'], description='Expose store specific information', operations=[ {'path': '/v2/info/stores/detail', 'method': 'GET'} ] ), ] def list_rules(): return discovery_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/image.py0000664000175000017500000002732700000000000017565 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from glance.policies import base DEPRECATED_REASON = """ The image API now supports roles. """ image_policies = [ policy.DocumentedRuleDefault( name="add_image", check_str=base.ADMIN_OR_PROJECT_MEMBER_CREATE_IMAGE, scope_types=['project'], description='Create new image', operations=[ {'path': '/v2/images', 'method': 'POST'} ], deprecated_rule=policy.DeprecatedRule( name="add_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY) ), policy.DocumentedRuleDefault( name="delete_image", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Deletes the image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'DELETE'} ], deprecated_rule=policy.DeprecatedRule( name="delete_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="get_image", check_str=base.ADMIN_OR_PROJECT_READER_GET_IMAGE, scope_types=['project'], description='Get specified image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="get_images", check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='Get all available images', operations=[ {'path': '/v2/images', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_images", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="modify_image", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Updates given image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'PATCH'} ], deprecated_rule=policy.DeprecatedRule( name="modify_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="publicize_image", check_str=base.ADMIN, scope_types=['project'], description='Publicize given image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'PATCH'} ] ), policy.DocumentedRuleDefault( name="communitize_image", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Communitize given image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'PATCH'} ], deprecated_rule=policy.DeprecatedRule( name="communitize_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="download_image", check_str=base.ADMIN_OR_PROJECT_MEMBER_DOWNLOAD_IMAGE, scope_types=['project'], description='Downloads given image', operations=[ {'path': '/v2/images/{image_id}/file', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="download_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="upload_image", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Uploads data to specified image', operations=[ {'path': '/v2/images/{image_id}/file', 'method': 'PUT'} ], deprecated_rule=policy.DeprecatedRule( name="upload_image", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="delete_image_location", check_str=base.ADMIN, scope_types=['project'], description='Deletes the location of given image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'PATCH'} ], deprecated_rule=policy.DeprecatedRule( name="delete_image_location", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="get_image_location", check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='Reads the location of the image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_image_location", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="set_image_location", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Sets location URI to given image', operations=[ {'path': '/v2/images/{image_id}', 'method': 'PATCH'} ], deprecated_rule=policy.DeprecatedRule( name="set_image_location", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="add_image_location", check_str=base.SERVICE_OR_PROJECT_MEMBER, scope_types=['project'], description='Add location URI to given image', operations=[ {'path': '/v2/images/{image_id}/locations', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="fetch_image_location", check_str=base.SERVICE, scope_types=['project'], description='Show all locations associated to given image', operations=[ {'path': '/v2/images/{image_id}/locations', 'method': 'GET'} ], ), policy.DocumentedRuleDefault( name="add_member", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Create image member', operations=[ {'path': '/v2/images/{image_id}/members', 'method': 'POST'} ], deprecated_rule=policy.DeprecatedRule( name="add_member", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="delete_member", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Delete image member', operations=[ {'path': '/v2/images/{image_id}/members/{member_id}', 'method': 'DELETE'} ], deprecated_rule=policy.DeprecatedRule( name="delete_member", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="get_member", check_str=base.ADMIN_OR_PROJECT_READER_OR_SHARED_MEMBER, scope_types=['project'], description='Show image member details', operations=[ {'path': '/v2/images/{image_id}/members/{member_id}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_member", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="get_members", check_str=base.ADMIN_OR_PROJECT_READER_OR_SHARED_MEMBER, scope_types=['project'], description='List image members', operations=[ {'path': '/v2/images/{image_id}/members', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_members", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="modify_member", check_str=base.ADMIN_OR_SHARED_MEMBER, scope_types=['project'], description='Update image member', operations=[ {'path': '/v2/images/{image_id}/members/{member_id}', 'method': 'PUT'} ], deprecated_rule=policy.DeprecatedRule( name="modify_member", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.RuleDefault( name="manage_image_cache", check_str=base.ADMIN, scope_types=['project'], description='Manage image cache' ), policy.DocumentedRuleDefault( name="deactivate", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Deactivate image', operations=[ {'path': '/v2/images/{image_id}/actions/deactivate', 'method': 'POST'} ], deprecated_rule=policy.DeprecatedRule( name="deactivate", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="reactivate", check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description='Reactivate image', operations=[ {'path': '/v2/images/{image_id}/actions/reactivate', 'method': 'POST'} ], deprecated_rule=policy.DeprecatedRule( name="reactivate", check_str=base.DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY), ), policy.DocumentedRuleDefault( name="copy_image", check_str=base.ADMIN, # For now this is restricted to project-admins. # That might change in the future if we decide to push # this functionality down to project-members. scope_types=['project'], description='Copy existing image to other stores', operations=[ {'path': '/v2/images/{image_id}/import', 'method': 'POST'} ] ), ] def list_rules(): return image_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/metadef.py0000664000175000017500000003076000000000000020103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from glance.policies import base DEPRECATED_REASON = """ The metadata API now supports project scope and default roles. """ METADEF_ADMIN = "rule:metadef_admin" METADEF_DEFAULT = "rule:metadef_default" metadef_policies = [ policy.RuleDefault(name="metadef_default", check_str=""), policy.RuleDefault(name="metadef_admin", check_str=base.ADMIN), policy.DocumentedRuleDefault( name="get_metadef_namespace", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get a specific namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_namespace", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="get_metadef_namespaces", check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List namespace.", operations=[ {'path': '/v2/metadefs/namespaces', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_namespaces", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="modify_metadef_namespace", check_str=METADEF_ADMIN, scope_types=['project'], description="Modify an existing namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}', 'method': 'PUT'} ], ), policy.DocumentedRuleDefault( name="add_metadef_namespace", check_str=METADEF_ADMIN, scope_types=['project'], description="Create a namespace.", operations=[ {'path': '/v2/metadefs/namespaces', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="delete_metadef_namespace", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}', 'method': 'DELETE'} ], ), policy.DocumentedRuleDefault( name="get_metadef_object", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get a specific object from a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/objects' '/{object_name}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_object", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="get_metadef_objects", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get objects from a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/objects', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_objects", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="modify_metadef_object", check_str=METADEF_ADMIN, scope_types=['project'], description="Update an object within a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/objects' '/{object_name}', 'method': 'PUT'} ], ), policy.DocumentedRuleDefault( name="add_metadef_object", check_str=METADEF_ADMIN, scope_types=['project'], description="Create an object within a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/objects', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="delete_metadef_object", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete an object within a namespace.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/objects' '/{object_name}', 'method': 'DELETE'} ], ), policy.DocumentedRuleDefault( name="list_metadef_resource_types", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="List meta definition resource types.", operations=[ {'path': '/v2/metadefs/resource_types', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="list_metadef_resource_types", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="get_metadef_resource_type", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get meta definition resource types associations.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/resource_types', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_resource_type", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="add_metadef_resource_type_association", check_str=METADEF_ADMIN, scope_types=['project'], description="Create meta definition resource types association.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/resource_types', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="remove_metadef_resource_type_association", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete meta definition resource types association.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/resource_types' '/{name}', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="get_metadef_property", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get a specific meta definition property.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/properties' '/{property_name}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_property", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="get_metadef_properties", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="List meta definition properties.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/properties', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_properties", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="modify_metadef_property", check_str=METADEF_ADMIN, scope_types=['project'], description="Update meta definition property.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/properties' '/{property_name}', 'method': 'GET'} ], ), policy.DocumentedRuleDefault( name="add_metadef_property", check_str=METADEF_ADMIN, scope_types=['project'], description="Create meta definition property.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/properties', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="remove_metadef_property", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete meta definition property.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/properties' '/{property_name}', 'method': 'DELETE'} ], ), policy.DocumentedRuleDefault( name="get_metadef_tag", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="Get tag definition.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags' '/{tag_name}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_tag", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="get_metadef_tags", check_str=base.ADMIN_OR_PROJECT_READER_GET_NAMESPACE, scope_types=['project'], description="List tag definitions.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_metadef_tags", check_str=METADEF_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.XENA ), ), policy.DocumentedRuleDefault( name="modify_metadef_tag", check_str=METADEF_ADMIN, scope_types=['project'], description="Update tag definition.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags' '/{tag_name}', 'method': 'PUT'} ], ), policy.DocumentedRuleDefault( name="add_metadef_tag", check_str=METADEF_ADMIN, scope_types=['project'], description="Add tag definition.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags' '/{tag_name}', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="add_metadef_tags", check_str=METADEF_ADMIN, scope_types=['project'], description="Create tag definitions.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags', 'method': 'POST'} ], ), policy.DocumentedRuleDefault( name="delete_metadef_tag", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete tag definition.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags' '/{tag_name}', 'method': 'DELETE'} ], ), policy.DocumentedRuleDefault( name="delete_metadef_tags", check_str=METADEF_ADMIN, scope_types=['project'], description="Delete tag definitions.", operations=[ {'path': '/v2/metadefs/namespaces/{namespace_name}/tags', 'method': 'DELETE'} ], ) ] def list_rules(): return metadef_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/policies/tasks.py0000664000175000017500000001057600000000000017626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from glance.policies import base TASK_DESCRIPTION = """ This granular policy controls access to tasks, both from the tasks API as well as internal locations in Glance that use tasks (like import). Practically this cannot be more restrictive than the policy that controls import or things will break, and changing it from the default is almost certainly not what you want. Access to the external tasks API should be restricted as desired by the tasks_api_access policy. This may change in the future. """ MODIFY_TASK_DEPRECATION = """ This policy check has never been honored by the API. It will be removed in a future release. """ TASK_ACCESS_DESCRIPTION = """ This is a generic blanket policy for protecting all task APIs. It is not granular and will not allow you to separate writable and readable task operations into different roles. """ DEPRECATION_REASON = """ From Xena we are enforcing policy checks in the API and policy layer where task policies were enforcing will be removed. Since task APIs are already deprecated and `tasks_api_access` is checked for each API at API layer, there will be no benefit of other having other task related policies. """ task_policies = [ policy.DocumentedRuleDefault( name="get_task", # All policies except tasks_api_access are internal policies that are # only called by glance as a result of some other operation. check_str=base.DEFAULT, scope_types=['project'], description='Get an image task.\n' + TASK_DESCRIPTION, operations=[ {'path': '/v2/tasks/{task_id}', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_task", check_str=base.DEFAULT, deprecated_reason=DEPRECATION_REASON, deprecated_since=versionutils.deprecated.XENA) ), policy.DocumentedRuleDefault( name="get_tasks", check_str=base.DEFAULT, scope_types=['project'], description='List tasks for all images.\n' + TASK_DESCRIPTION, operations=[ {'path': '/v2/tasks', 'method': 'GET'} ], deprecated_rule=policy.DeprecatedRule( name="get_tasks", check_str=base.DEFAULT, deprecated_reason=DEPRECATION_REASON, deprecated_since=versionutils.deprecated.XENA) ), policy.DocumentedRuleDefault( name="add_task", check_str=base.DEFAULT, scope_types=['project'], description='List tasks for all images.\n' + TASK_DESCRIPTION, operations=[ {'path': '/v2/tasks', 'method': 'POST'} ], deprecated_rule=policy.DeprecatedRule( name="add_task", check_str=base.DEFAULT, deprecated_reason=DEPRECATION_REASON, deprecated_since=versionutils.deprecated.XENA) ), policy.DocumentedRuleDefault( name="modify_task", check_str=base.DEFAULT, scope_types=['project'], description="This policy is not used.", operations=[ {'path': '/v2/tasks/{task_id}', 'method': 'DELETE'} ], deprecated_for_removal=True, deprecated_reason=MODIFY_TASK_DEPRECATION, deprecated_since=versionutils.deprecated.WALLABY, ), policy.DocumentedRuleDefault( name="tasks_api_access", check_str=base.ADMIN, scope_types=['project'], description=TASK_ACCESS_DESCRIPTION, operations=[ {'path': '/v2/tasks/{task_id}', 'method': 'GET'}, {'path': '/v2/tasks', 'method': 'GET'}, {'path': '/v2/tasks', 'method': 'POST'}, {'path': '/v2/tasks/{task_id}', 'method': 'DELETE'} ], ) ] def list_rules(): return task_policies ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8823059 glance-29.0.0/glance/quota/0000775000175000017500000000000000000000000015440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/quota/__init__.py0000664000175000017500000003514100000000000017555 0ustar00zuulzuul00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import glance_store as store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import glance.api.common import glance.common.exception as exception from glance.common import utils import glance.domain import glance.domain.proxy from glance.i18n import _, _LI LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('image_member_quota', 'glance.common.config') CONF.import_opt('image_property_quota', 'glance.common.config') CONF.import_opt('image_tag_quota', 'glance.common.config') def _enforce_image_tag_quota(tags): if CONF.image_tag_quota < 0: # If value is negative, allow unlimited number of tags return if not tags: return if len(tags) > CONF.image_tag_quota: raise exception.ImageTagLimitExceeded(attempted=len(tags), maximum=CONF.image_tag_quota) def _calc_required_size(context, image, locations): required_size = None if image.size: required_size = image.size * len(locations) else: for location in locations: size_from_backend = None try: if CONF.enabled_backends: size_from_backend = store.get_size_from_uri_and_backend( location['url'], location['metadata'].get('store'), context=context) else: size_from_backend = store.get_size_from_backend( location['url'], context=context) except (store.UnknownScheme, store.NotFound): pass except store.BadStoreUri: raise exception.BadStoreUri if size_from_backend: required_size = size_from_backend * len(locations) break return required_size def _enforce_image_location_quota(image, locations, is_setter=False): if CONF.image_location_quota < 0: # If value is negative, allow unlimited number of locations return attempted = len(image.locations) + len(locations) attempted = attempted if not is_setter else len(locations) maximum = CONF.image_location_quota if attempted > maximum: raise exception.ImageLocationLimitExceeded(attempted=attempted, maximum=maximum) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, db_api, store_utils): self.image_repo = image_repo self.db_api = db_api proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) def _enforce_image_property_quota(self, properties): if CONF.image_property_quota < 0: # If value is negative, allow unlimited number of properties return attempted = len( [x for x in properties.keys() if not x.startswith(glance.api.common.GLANCE_RESERVED_NS)]) maximum = CONF.image_property_quota if attempted > maximum: kwargs = {'attempted': attempted, 'maximum': maximum} exc = exception.ImagePropertyLimitExceeded(**kwargs) LOG.debug(encodeutils.exception_to_unicode(exc)) raise exc def save(self, image, from_state=None): if image.added_new_properties(): self._enforce_image_property_quota(image.extra_properties) return super(ImageRepoProxy, self).save(image, from_state=from_state) def add(self, image): self._enforce_image_property_quota(image.extra_properties) return super(ImageRepoProxy, self).add(image) class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, factory, context, db_api, store_utils): proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageFactoryProxy, self).__init__(factory, proxy_class=ImageProxy, proxy_kwargs=proxy_kwargs) def new_image(self, **kwargs): tags = kwargs.pop('tags', set([])) _enforce_image_tag_quota(tags) return super(ImageFactoryProxy, self).new_image(tags=tags, **kwargs) class QuotaImageTagsProxy(object): def __init__(self, orig_set): if orig_set is None: orig_set = set([]) self.tags = orig_set def add(self, item): self.tags.add(item) _enforce_image_tag_quota(self.tags) def __cast__(self, *args, **kwargs): return self.tags.__cast__(*args, **kwargs) def __contains__(self, *args, **kwargs): return self.tags.__contains__(*args, **kwargs) def __eq__(self, other): return self.tags == other def __ne__(self, other): return not self.__eq__(other) def __iter__(self, *args, **kwargs): return self.tags.__iter__(*args, **kwargs) def __len__(self, *args, **kwargs): return self.tags.__len__(*args, **kwargs) def __getattr__(self, name): # Protect against deepcopy, which calls getattr. __getattr__ # is only called when an attribute is not "normal", so when # self.tags is called, this is not. if name == 'tags': try: return self.__getattribute__('tags') except AttributeError: return None return getattr(self.tags, name) class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): def __init__(self, member_factory, context, db_api, store_utils): self.db_api = db_api self.context = context proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageMemberFactoryProxy, self).__init__( member_factory, proxy_class=ImageMemberProxy, proxy_kwargs=proxy_kwargs) def _enforce_image_member_quota(self, image): if CONF.image_member_quota < 0: # If value is negative, allow unlimited number of members return current_member_count = self.db_api.image_member_count(self.context, image.image_id) attempted = current_member_count + 1 maximum = CONF.image_member_quota if attempted > maximum: raise exception.ImageMemberLimitExceeded(attempted=attempted, maximum=maximum) def new_image_member(self, image, member_id): self._enforce_image_member_quota(image) return super(ImageMemberFactoryProxy, self).new_image_member(image, member_id) class QuotaImageLocationsProxy(object): def __init__(self, image, context, db_api): self.image = image self.context = context self.db_api = db_api self.locations = image.locations def __cast__(self, *args, **kwargs): return self.locations.__cast__(*args, **kwargs) def __contains__(self, *args, **kwargs): return self.locations.__contains__(*args, **kwargs) def __delitem__(self, *args, **kwargs): return self.locations.__delitem__(*args, **kwargs) def __delslice__(self, *args, **kwargs): return self.locations.__delslice__(*args, **kwargs) def __eq__(self, other): return self.locations == other def __ne__(self, other): return not self.__eq__(other) def __getitem__(self, *args, **kwargs): return self.locations.__getitem__(*args, **kwargs) def __iadd__(self, other): if not hasattr(other, '__iter__'): raise TypeError() self._check_user_storage_quota(other) return self.locations.__iadd__(other) def __iter__(self, *args, **kwargs): return self.locations.__iter__(*args, **kwargs) def __len__(self, *args, **kwargs): return self.locations.__len__(*args, **kwargs) def __setitem__(self, key, value): return self.locations.__setitem__(key, value) def count(self, *args, **kwargs): return self.locations.count(*args, **kwargs) def index(self, *args, **kwargs): return self.locations.index(*args, **kwargs) def pop(self, *args, **kwargs): return self.locations.pop(*args, **kwargs) def remove(self, *args, **kwargs): return self.locations.remove(*args, **kwargs) def reverse(self, *args, **kwargs): return self.locations.reverse(*args, **kwargs) def _check_user_storage_quota(self, locations): required_size = _calc_required_size(self.context, self.image, locations) glance.api.common.check_quota(self.context, required_size, self.db_api) _enforce_image_location_quota(self.image, locations) def __copy__(self): return type(self)(self.image, self.context, self.db_api) def __deepcopy__(self, memo): # NOTE(zhiyan): Only copy location entries, others can be reused. self.image.locations = copy.deepcopy(self.locations, memo) return type(self)(self.image, self.context, self.db_api) def append(self, object): self._check_user_storage_quota([object]) return self.locations.append(object) def insert(self, index, object): self._check_user_storage_quota([object]) return self.locations.insert(index, object) def extend(self, iter): self._check_user_storage_quota(iter) return self.locations.extend(iter) class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, db_api, store_utils): self.image = image self.context = context self.db_api = db_api self.store_utils = store_utils super(ImageProxy, self).__init__(image) self.orig_props = set(image.extra_properties.keys()) def set_data(self, data, size=None, backend=None, set_active=True): remaining = glance.api.common.check_quota( self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader( data, remaining, exception_class=exception.StorageQuotaFull) self.image.set_data(data, size=size, backend=backend, set_active=set_active) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute[1] then there is a race condition # that will allow for the quota to be broken[2]. Thus we must recheck # the quota after the upload and thus after we know the size. # # Also, when an upload doesn't set the size properly then the call to # check_quota above returns None and so utils.LimitingReader is not # used above. Hence the store (e.g. filesystem store) may have to # download the entire file before knowing the actual file size. Here # also we need to check for the quota again after the image has been # downloaded to the store. # # [1] For e.g. when using chunked transfers the 'Content-Length' # header is not set. # [2] For e.g.: # - Upload 1 does not exceed quota but upload 2 exceeds quota. # Both uploads are to different locations # - Upload 2 completes before upload 1 and writes image.size. # - Immediately, upload 1 completes and (over)writes image.size # with the smaller size. # - Now, to glance, image has not exceeded quota but, in # reality, the quota has been exceeded. try: glance.api.common.check_quota( self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding the quota.'), self.image.image_id) self.store_utils.safe_delete_from_backend( self.context, self.image.image_id, self.image.locations[0]) @property def tags(self): return QuotaImageTagsProxy(self.image.tags) @tags.setter def tags(self, value): _enforce_image_tag_quota(value) self.image.tags = value @property def locations(self): return QuotaImageLocationsProxy(self.image, self.context, self.db_api) @locations.setter def locations(self, value): _enforce_image_location_quota(self.image, value, is_setter=True) if not isinstance(value, (list, QuotaImageLocationsProxy)): raise exception.Invalid(_('Invalid locations: %s') % value) required_size = _calc_required_size(self.context, self.image, value) glance.api.common.check_quota( self.context, required_size, self.db_api, image_id=self.image.image_id) self.image.locations = value def added_new_properties(self): current_props = set(self.image.extra_properties.keys()) return bool(current_props.difference(self.orig_props)) class ImageMemberProxy(glance.domain.proxy.ImageMember): def __init__(self, image_member, context, db_api, store_utils): self.image_member = image_member self.context = context self.db_api = db_api self.store_utils = store_utils super(ImageMemberProxy, self).__init__(image_member) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/quota/keystone.py0000664000175000017500000001451000000000000017654 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_limit import exception as ol_exc from oslo_limit import limit from oslo_log import log as logging from oslo_utils import units from glance.common import exception from glance.db.sqlalchemy import api as db from glance.i18n import _LE CONF = cfg.CONF CONF.import_opt('use_keystone_limits', 'glance.common.config') LOG = logging.getLogger(__name__) limit.opts.register_opts(CONF) QUOTA_IMAGE_SIZE_TOTAL = 'image_size_total' QUOTA_IMAGE_STAGING_TOTAL = 'image_stage_total' QUOTA_IMAGE_COUNT_TOTAL = 'image_count_total' QUOTA_IMAGE_COUNT_UPLOADING = 'image_count_uploading' def _enforce_some(context, project_id, quota_value_fns, deltas): """Helper method to enforce a set of quota values. :param context: The RequestContext :param project_id: The project_id of the tenant being checked :param get_value_fns: A mapping of quota names to functions that will be called with no arguments to return the numerical value representing current usage. :param deltas: A mapping of quota names to the amount of resource being requested for each (to be added to the current usage before determining if over-quota). :raises: exception.LimitExceeded if the current usage is over the defined limit. :returns: None if the tenant is not currently over their quota. """ if not CONF.use_keystone_limits: return def callback(project_id, resource_names): return {name: quota_value_fns[name]() for name in resource_names} enforcer = limit.Enforcer(callback) try: enforcer.enforce(project_id, {quota_name: deltas.get(quota_name, 0) for quota_name in quota_value_fns}) except ol_exc.ProjectOverLimit as e: raise exception.LimitExceeded(body=str(e)) except ol_exc.SessionInitError as e: LOG.error(_LE('Failed to initialize oslo_limit, likely due to ' 'incorrect or insufficient configuration: %(err)s'), {'err': str(e)}) # We could just raise LimitExceeded here, but a 500 is # appropriate for incorrect server-side configuration, so we # re-raise here after the above error message to make sure we # are noticed. raise def _enforce_one(context, project_id, quota_name, get_value_fn, delta=0): """Helper method to enforce a single named quota value. :param context: The RequestContext :param project_id: The project_id of the tenant being checked :param quota_name: One of the quota names defined above :param get_value_fn: A function that will be called with no arguments to return the numerical value representing current usage. :param delta: The amount of resource being requested (to be added to the current usage before determining if over-quota). :raises: exception.LimitExceeded if the current usage is over the defined limit. :returns: None if the tenant is not currently over their quota. """ return _enforce_some(context, project_id, {quota_name: get_value_fn}, {quota_name: delta}) def enforce_image_size_total(context, project_id, delta=0): """Enforce the image_size_total quota. This enforces the total image size quota for the supplied project_id. """ _enforce_one( context, project_id, QUOTA_IMAGE_SIZE_TOTAL, lambda: db.user_get_storage_usage(context, project_id) // units.Mi, delta=delta) def enforce_image_staging_total(context, project_id, delta=0): """Enforce the image_stage_total quota. This enforces the total size of all images stored in staging areas for the supplied project_id. """ _enforce_one( context, project_id, QUOTA_IMAGE_STAGING_TOTAL, lambda: db.user_get_staging_usage(context, project_id) // units.Mi, delta=delta) def enforce_image_count_total(context, project_id): """Enforce the image_count_total quota. This enforces the total count of non-deleted images owned by the supplied project_id. """ _enforce_one( context, project_id, QUOTA_IMAGE_COUNT_TOTAL, lambda: db.user_get_image_count(context, project_id), delta=1) def enforce_image_count_uploading(context, project_id): """Enforce the image_count_uploading quota. This enforces the total count of images in any state of upload by the supplied project_id. :param delta: This defaults to one, but should be zero when checking an operation on an image that already counts against this quota (i.e. a stage operation of an existing queue image). """ _enforce_one( context, project_id, QUOTA_IMAGE_COUNT_UPLOADING, lambda: db.user_get_uploading_count(context, project_id), delta=0) def get_usage(context, project_id=None): if not CONF.use_keystone_limits: return {} if not project_id: project_id = context.project_id usages = { QUOTA_IMAGE_SIZE_TOTAL: lambda: db.user_get_storage_usage( context, project_id) // units.Mi, QUOTA_IMAGE_STAGING_TOTAL: lambda: db.user_get_staging_usage( context, project_id) // units.Mi, QUOTA_IMAGE_COUNT_TOTAL: lambda: db.user_get_image_count( context, project_id), QUOTA_IMAGE_COUNT_UPLOADING: lambda: db.user_get_uploading_count( context, project_id), } def callback(project_id, resource_names): return {name: usages[name]() for name in resource_names} enforcer = limit.Enforcer(callback) return enforcer.calculate_usage(project_id, list(usages.keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/schema.py0000664000175000017500000001756000000000000016132 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema from oslo_utils import encodeutils from glance.common import exception from glance.i18n import _ class Schema(object): def __init__(self, name, properties=None, links=None, required=None, definitions=None): self.name = name if properties is None: properties = {} self.properties = properties self.links = links self.required = required self.definitions = definitions def validate(self, obj): try: jsonschema.validate(obj, self.raw()) except jsonschema.ValidationError as e: reason = encodeutils.exception_to_unicode(e) raise exception.InvalidObject(schema=self.name, reason=reason) def filter(self, obj): filtered = {} for key, value in obj.items(): if self._filter_func(self.properties, key): filtered[key] = value # NOTE(flaper87): This exists to allow for v1, null properties, # to be used with the V2 API. During Kilo, it was allowed for the # later to return None values without considering that V1 allowed # for custom properties to be None, which is something V2 doesn't # allow for. This small hack here will set V1 custom `None` pro- # perties to an empty string so that they will be updated along # with the image (if an update happens). # # We could skip the properties that are `None` but that would bring # back the behavior we moved away from. Note that we can't consider # doing a schema migration because we don't know which properties # are "custom" and which came from `schema-image` if those custom # properties were created with v1. if key not in self.properties and value is None: filtered[key] = '' return filtered @staticmethod def _filter_func(properties, key): return key in properties def merge_properties(self, properties): # Ensure custom props aren't attempting to override base props original_keys = set(self.properties.keys()) new_keys = set(properties.keys()) intersecting_keys = original_keys.intersection(new_keys) conflicting_keys = [k for k in intersecting_keys if self.properties[k] != properties[k]] if conflicting_keys: props = ', '.join(conflicting_keys) reason = _("custom properties (%(props)s) conflict " "with base properties") raise exception.SchemaLoadError(reason=reason % {'props': props}) self.properties.update(properties) def raw(self): raw = { 'name': self.name, 'properties': self.properties, 'additionalProperties': False, } if self.definitions: raw['definitions'] = self.definitions if self.required: raw['required'] = self.required if self.links: raw['links'] = self.links return raw def minimal(self): minimal = { 'name': self.name, 'properties': self.properties } if self.definitions: minimal['definitions'] = self.definitions if self.required: minimal['required'] = self.required return minimal class PermissiveSchema(Schema): @staticmethod def _filter_func(properties, key): return True def raw(self): raw = super(PermissiveSchema, self).raw() raw['additionalProperties'] = {'type': 'string'} return raw def minimal(self): minimal = super(PermissiveSchema, self).raw() return minimal class CollectionSchema(object): def __init__(self, name, item_schema): self.name = name self.item_schema = item_schema def raw(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None raw = { 'name': self.name, 'properties': { self.name: { 'type': 'array', 'items': self.item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: raw['definitions'] = definitions self.item_schema.definitions = definitions return raw def minimal(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None minimal = { 'name': self.name, 'properties': { self.name: { 'type': 'array', 'items': self.item_schema.minimal(), }, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: minimal['definitions'] = definitions self.item_schema.definitions = definitions return minimal class DictCollectionSchema(Schema): def __init__(self, name, item_schema): self.name = name self.item_schema = item_schema def raw(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None raw = { 'name': self.name, 'properties': { self.name: { 'type': 'object', 'additionalProperties': self.item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: raw['definitions'] = definitions self.item_schema.definitions = definitions return raw def minimal(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None minimal = { 'name': self.name, 'properties': { self.name: { 'type': 'object', 'additionalProperties': self.item_schema.minimal(), }, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: minimal['definitions'] = definitions self.item_schema.definitions = definitions return minimal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/scrubber.py0000664000175000017500000004052100000000000016472 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import time import eventlet from glance_store import exceptions as store_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import encodeutils from glance.common import crypt from glance.common import exception from glance.common import timeutils from glance import context import glance.db as db_api from glance.i18n import _, _LC, _LE, _LI, _LW LOG = logging.getLogger(__name__) DEPRECATED_SUBSTRING = ('is scheduled to be removed during the 2024.2 ' '(Dalmatian) development cycle.') DEPRECATED_SCRUBBER_MSG = ('The glance scrubber has been deprecated and ' + DEPRECATED_SUBSTRING) DEPRECATED_OPTS_MSG = ('The entire glance scrubber, including this option, ' + DEPRECATED_SUBSTRING) scrubber_opts = [ cfg.IntOpt('scrub_time', default=0, min=0, deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" The amount of time, in seconds, to delay image scrubbing. When delayed delete is turned on, an image is put into ``pending_delete`` state upon deletion until the scrubber deletes its image data. Typically, soon after the image is put into ``pending_delete`` state, it is available for scrubbing. However, scrubbing can be delayed until a later point using this configuration option. This option denotes the time period an image spends in ``pending_delete`` state before it is available for scrubbing. It is important to realize that this has storage implications. The larger the ``scrub_time``, the longer the time to reclaim backend storage from deleted images. Possible values: * Any non-negative integer Related options: * ``delayed_delete`` """)), cfg.IntOpt('scrub_pool_size', default=1, min=1, deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" The size of thread pool to be used for scrubbing images. When there are a large number of images to scrub, it is beneficial to scrub images in parallel so that the scrub queue stays in control and the backend storage is reclaimed in a timely fashion. This configuration option denotes the maximum number of images to be scrubbed in parallel. The default value is one, which signifies serial scrubbing. Any value above one indicates parallel scrubbing. Possible values: * Any non-zero positive integer Related options: * ``delayed_delete`` """)), cfg.BoolOpt('delayed_delete', default=False, deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" Turn on/off delayed delete. Typically when an image is deleted, the ``glance-api`` service puts the image into ``deleted`` state and deletes its data at the same time. Delayed delete is a feature in Glance that delays the actual deletion of image data until a later point in time (as determined by the configuration option ``scrub_time``). When delayed delete is turned on, the ``glance-api`` service puts the image into ``pending_delete`` state upon deletion and leaves the image data in the storage backend for the image scrubber to delete at a later time. The image scrubber will move the image into ``deleted`` state upon successful deletion of image data. NOTE: When delayed delete is turned on, image scrubber MUST be running as a periodic task to prevent the backend storage from filling up with undesired usage. Possible values: * True * False Related options: * ``scrub_time`` * ``wakeup_time`` * ``scrub_pool_size`` """)), ] scrubber_cmd_opts = [ cfg.IntOpt('wakeup_time', default=300, min=0, deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" Time interval, in seconds, between scrubber runs in daemon mode. Scrubber can be run either as a cron job or daemon. When run as a daemon, this configuration time specifies the time period between two runs. When the scrubber wakes up, it fetches and scrubs all ``pending_delete`` images that are available for scrubbing after taking ``scrub_time`` into consideration. If the wakeup time is set to a large number, there may be a large number of images to be scrubbed for each run. Also, this impacts how quickly the backend storage is reclaimed. Possible values: * Any non-negative integer Related options: * ``daemon`` * ``delayed_delete`` """)) ] scrubber_cmd_cli_opts = [ cfg.BoolOpt('daemon', short='D', default=False, deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" Run scrubber as a daemon. This boolean configuration option indicates whether scrubber should run as a long-running process that wakes up at regular intervals to scrub images. The wake up interval can be specified using the configuration option ``wakeup_time``. If this configuration option is set to ``False``, which is the default value, scrubber runs once to scrub images and exits. In this case, if the operator wishes to implement continuous scrubbing of images, scrubber needs to be scheduled as a cron job. Possible values: * True * False Related options: * ``wakeup_time`` """)), cfg.StrOpt('restore', metavar='', deprecated_for_removal=True, deprecated_since='2024.1 (Caracal)', deprecated_reason=DEPRECATED_OPTS_MSG, help=_(""" Restore the image status from 'pending_delete' to 'active'. This option is used by administrator to reset the image's status from 'pending_delete' to 'active' when the image is deleted by mistake and 'pending delete' feature is enabled in Glance. Please make sure the glance-scrubber daemon is stopped before restoring the image to avoid image data inconsistency. Possible values: * image's uuid """)) ] CONF = cfg.CONF CONF.register_opts(scrubber_opts) CONF.import_opt('metadata_encryption_key', 'glance.common.config') REASONABLE_DB_PAGE_SIZE = 1000 class ScrubDBQueue(object): """Database-based image scrub queue class.""" def __init__(self): self.scrub_time = CONF.scrub_time self.metadata_encryption_key = CONF.metadata_encryption_key self.admin_context = context.get_admin_context(show_deleted=True) def add_location(self, image_id, location): """Adding image location to scrub queue. :param image_id: The opaque image identifier :param location: The opaque image location :returns: A boolean value to indicate success or not """ loc_id = location.get('id') if loc_id: db_api.get_api().image_location_delete(self.admin_context, image_id, loc_id, 'pending_delete') return True else: return False def _get_images_page(self, marker): filters = {'deleted': True, 'status': 'pending_delete'} return db_api.get_api().image_get_all(self.admin_context, filters=filters, marker=marker, limit=REASONABLE_DB_PAGE_SIZE) def _get_all_images(self): """Generator to fetch all appropriate images, paging as needed.""" marker = None while True: images = self._get_images_page(marker) if len(images) == 0: break marker = images[-1]['id'] for image in images: yield image def get_all_locations(self): """Returns a list of image id and location tuple from scrub queue. :returns: a list of image id, location id and uri tuple from scrub queue """ ret = [] for image in self._get_all_images(): deleted_at = image.get('deleted_at') if not deleted_at: continue # NOTE: Strip off microseconds which may occur after the last '.,' # Example: 2012-07-07T19:14:34.974216 deleted_at = timeutils.isotime(deleted_at) date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] delete_time = calendar.timegm(time.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ")) if delete_time + self.scrub_time > time.time(): continue for loc in image['locations']: if loc['status'] != 'pending_delete': continue if self.metadata_encryption_key: uri = crypt.urlsafe_decrypt(self.metadata_encryption_key, loc['url']) else: uri = loc['url'] # if multi-store is enabled then we need to pass backend # to delete the image. backend = loc['metadata'].get('store') if CONF.enabled_backends: ret.append((image['id'], loc['id'], uri, backend)) else: ret.append((image['id'], loc['id'], uri)) return ret def has_image(self, image_id): """Returns whether the queue contains an image or not. :param image_id: The opaque image identifier :returns: a boolean value to inform including or not """ try: image = db_api.get_api().image_get(self.admin_context, image_id) return image['status'] == 'pending_delete' except exception.NotFound: return False _db_queue = None def get_scrub_queue(): global _db_queue if not _db_queue: _db_queue = ScrubDBQueue() return _db_queue class Daemon(object): def __init__(self, wakeup_time=300, threads=100): versionutils.report_deprecated_feature(LOG, DEPRECATED_SCRUBBER_MSG) LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s"), {'wakeup_time': wakeup_time, 'threads': threads}) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() # This pool is used for periodic instantiation of scrubber self.daemon_pool = eventlet.greenpool.GreenPool(threads) def start(self, application): self._run(application) def wait(self): try: self.event.wait() except KeyboardInterrupt: msg = _LI("Daemon Shutdown on KeyboardInterrupt") LOG.info(msg) def _run(self, application): LOG.debug("Running application") self.daemon_pool.spawn_n(application.run, self.event) eventlet.spawn_after(self.wakeup_time, self._run, application) LOG.debug("Next run scheduled in %s seconds", self.wakeup_time) class Scrubber(object): def __init__(self, store_api): versionutils.report_deprecated_feature(LOG, DEPRECATED_SCRUBBER_MSG) LOG.info(_LI("Initializing scrubber")) self.store_api = store_api self.admin_context = context.get_admin_context(show_deleted=True) self.db_queue = get_scrub_queue() self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size) def _get_delete_jobs(self): try: records = self.db_queue.get_all_locations() except Exception as err: # Note(dharinic): spawn_n, in Daemon mode will log the # exception raised. Otherwise, exit 1 will occur. msg = (_LC("Can not get scrub jobs from queue: %s") % encodeutils.exception_to_unicode(err)) LOG.critical(msg) raise exception.FailedToGetScrubberJobs() delete_jobs = {} if CONF.enabled_backends: for image_id, loc_id, loc_uri, backend in records: if image_id not in delete_jobs: delete_jobs[image_id] = [] delete_jobs[image_id].append((image_id, loc_id, loc_uri, backend)) else: for image_id, loc_id, loc_uri in records: if image_id not in delete_jobs: delete_jobs[image_id] = [] delete_jobs[image_id].append((image_id, loc_id, loc_uri)) return delete_jobs def run(self, event=None): delete_jobs = self._get_delete_jobs() if delete_jobs: list(self.pool.starmap(self._scrub_image, delete_jobs.items())) def _scrub_image(self, image_id, delete_jobs): if len(delete_jobs) == 0: return LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations."), {'id': image_id, 'count': len(delete_jobs)}) success = True if CONF.enabled_backends: for img_id, loc_id, uri, backend in delete_jobs: try: self._delete_image_location_from_backend(img_id, loc_id, uri, backend=backend) except Exception: success = False else: for img_id, loc_id, uri in delete_jobs: try: self._delete_image_location_from_backend(img_id, loc_id, uri) except Exception: success = False if success: image = db_api.get_api().image_get(self.admin_context, image_id) if image['status'] == 'pending_delete': db_api.get_api().image_update(self.admin_context, image_id, {'status': 'deleted'}) LOG.info(_LI("Image %s has been scrubbed successfully"), image_id) else: LOG.warning(_LW("One or more image locations couldn't be scrubbed " "from backend. Leaving image '%s' in " "'pending_delete' status"), image_id) def _delete_image_location_from_backend(self, image_id, loc_id, uri, backend=None): try: LOG.debug("Scrubbing image %s from a location.", image_id) try: if CONF.enabled_backends: self.store_api.delete(uri, backend, self.admin_context) else: self.store_api.delete_from_backend(uri, self.admin_context) except store_exceptions.NotFound: LOG.info(_LI("Image location for image '%s' not found in " "backend; Marking image location deleted in " "db."), image_id) if loc_id != '-': db_api.get_api().image_location_delete(self.admin_context, image_id, int(loc_id), 'deleted') LOG.info(_LI("Image %s is scrubbed from a location."), image_id) except Exception as e: LOG.error(_LE("Unable to scrub image %(id)s from a location. " "Reason: %(exc)s "), {'id': image_id, 'exc': encodeutils.exception_to_unicode(e)}) raise def revert_image_status(self, image_id): db_api.get_api().image_restore(self.admin_context, image_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/sqlite_migration.py0000664000175000017500000001424600000000000020242 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance import context import glance.db from glance.i18n import _ from glance.image_cache.drivers import common LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt("image_cache_sqlite_db", "glance.image_cache.drivers.sqlite") def can_migrate_to_central_db(): # Return immediately if cache is disabled if not (CONF.paste_deploy.flavor and 'cache' in CONF.paste_deploy.flavor): return False is_centralized_db_driver = CONF.image_cache_driver == "centralized_db" # Check worker_self_reference_url is set if cache is enabled and # cache driver is centralized_db if is_centralized_db_driver and not CONF.worker_self_reference_url: msg = _("'worker_self_reference_url' needs to be set " "if `centralized_db` is defined as cache driver " "for image_cache_driver config option.") raise RuntimeError(msg) return is_centralized_db_driver def migrate_if_required(): if can_migrate_to_central_db(): sqlite_db_file = get_db_path() if sqlite_db_file: LOG.info("Initiating migration process from SQLite to Centralized " "database") migrate = Migrate(sqlite_db_file, glance.db.get_api()) migrate.migrate() def get_db_path(): """Return the local path to sqlite database.""" db = CONF.image_cache_sqlite_db base_dir = CONF.image_cache_dir db_file = os.path.join(base_dir, db) if not os.path.exists(db_file): LOG.debug('SQLite caching database not located, skipping migration') return return db_file class Migrate: def __init__(self, db, db_api): self.db = db self.db_api = db_api self.context = context.get_admin_context() self.node_reference = CONF.worker_self_reference_url @lockutils.synchronized('sqlite_centralized_migrate', external=True) def migrate(self): LOG.debug("Adding local node reference %(node)s in centralized db", {'node': self.node_reference}) to_be_deleted = [] try: self.db_api.node_reference_create( self.context, self.node_reference) except exception.Duplicate: LOG.debug("Node reference %(node)s is already recorded, " "ignoring it", {'node': self.node_reference}) LOG.debug("Connecting to SQLite db %s", self.db) with common.get_db(self.db) as sqlite_db: cur = sqlite_db.execute("""SELECT image_id, hits, last_accessed, last_modified, size FROM cached_images ORDER BY image_id""") cur.row_factory = common.dict_factory for r in cur: # NOTE(abhishekk): Check if cache record is already present for # current node in centralized db if not self.db_api.is_image_cached_for_node( self.context, self.node_reference, r['image_id']): LOG.debug("Migrating image %s from SQLite to Centralized " "db.", r['image_id']) # NOTE(abhishekk): Converting dates to be compatible with # centralized db last_accessed = datetime.datetime.utcfromtimestamp( r['last_accessed']).isoformat() last_modified = datetime.datetime.utcfromtimestamp( r['last_modified']).isoformat() # insert into centralized database self.db_api.insert_cache_details( self.context, self.node_reference, r['image_id'], r['size'], hits=r['hits'], last_modified=last_modified, last_accessed=last_accessed) # Verify entry is made in centralized db before adding # image id to list to delete later from sqlite db if self.db_api.is_image_cached_for_node( self.context, self.node_reference, r['image_id']): LOG.debug("Image %(uuid)s is migrated to centralized " "db for node %(node)s", {'uuid': r['image_id'], 'node': self.node_reference}) to_be_deleted.append(r['image_id']) else: LOG.debug('Skipping migrating image %(uuid)s from SQLite ' 'to Centralized db for node %(node)s as it is ' 'present in Centralized db.', {'uuid': r['image_id'], 'node': self.node_reference}) # Delete the images from sqlite db which are migrated to # centralized db for image_id in to_be_deleted: LOG.debug("Deleting image %s from SQLite db", image_id) sqlite_db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id,)) sqlite_db.commit() if to_be_deleted: LOG.debug("Migrated %d records from SQLite db to Centralized " "db", len(to_be_deleted)) else: # NOTE(abhishekk): Safe to assume, no records present in SQLite db LOG.debug("No cache records found, skipping migration process.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8823059 glance-29.0.0/glance/tests/0000775000175000017500000000000000000000000015451 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/__init__.py0000664000175000017500000000350500000000000017565 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import builtins import os import eventlet # NOTE(jokke): As per the eventlet commit # b756447bab51046dfc6f1e0e299cc997ab343701 there's circular import happening # which can be solved making sure the hubs are properly and fully imported # before calling monkey_patch(). This is solved in eventlet 0.22.0 but we # need to address it before that is widely used around. eventlet.hubs.get_hub() if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.patcher.monkey_patch(os=False) else: eventlet.patcher.monkey_patch() import glance.async_ # NOTE(danms): Default to eventlet threading for tests glance.async_.set_threadpool_model('eventlet') # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables tests to work with i18n _() blocks setattr(builtins, '_', lambda x: x) # Set up logging to output debugging import logging logger = logging.getLogger() hdlr = logging.FileHandler('run_tests.log', 'w') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8823059 glance-29.0.0/glance/tests/etc/0000775000175000017500000000000000000000000016224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/etc/glance-swift.conf0000664000175000017500000000100400000000000021451 0ustar00zuulzuul00000000000000[ref1] user = tenant:user1 key = key1 auth_address = example.com [ref2] user = user2 key = key2 auth_address = http://example.com [store_2] user = tenant:user1 key = key1 auth_address= https://localhost:8080 [store_3] user= tenant:user2 key= key2 auth_address= https://localhost:8080 [store_4] user = tenant:user1 key = key1 auth_address = http://localhost:80 [store_5] user = tenant:user1 key = key1 auth_address = http://localhost [store_6] user = tenant:user1 key = key1 auth_address = https://localhost/v1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/etc/policy.yaml0000664000175000017500000000032600000000000020410 0ustar00zuulzuul00000000000000# FIXME (abhishekk): This special rule is required in unit tests # to test property protection using policies. Need to make provision # to set such rules on the fly. "glance_creator": "role:admin or role:spl_role" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/etc/property-protections-policies.conf0000664000175000017500000000163300000000000025136 0ustar00zuulzuul00000000000000[spl_creator_policy] create = glance_creator read = glance_creator update = context_is_admin delete = context_is_admin [spl_default_policy] create = context_is_admin read = default update = context_is_admin delete = context_is_admin [^x_all_permitted.*] create = @ read = @ update = @ delete = @ [^x_none_permitted.*] create = ! read = ! update = ! delete = ! [x_none_read] create = context_is_admin read = ! update = ! delete = ! [x_none_update] create = context_is_admin read = context_is_admin update = ! delete = context_is_admin [x_none_delete] create = context_is_admin read = context_is_admin update = context_is_admin delete = ! [x_foo_matcher] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin [x_foo_*] create = @ read = @ update = @ delete = @ [.*] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/etc/property-protections.conf0000664000175000017500000000267300000000000023336 0ustar00zuulzuul00000000000000[^x_owner_.*] create = admin,member read = admin,member update = admin,member delete = admin,member [spl_create_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin [spl_read_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin [spl_read_only_prop] create = admin read = admin,spl_role update = admin delete = admin [spl_update_prop] create = admin,spl_role read = admin,spl_role update = admin,spl_role delete = admin [spl_update_only_prop] create = admin read = admin update = admin,spl_role delete = admin [spl_delete_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin,spl_role [spl_delete_empty_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin,spl_role [^x_all_permitted.*] create = @ read = @ update = @ delete = @ [^x_none_permitted.*] create = ! read = ! update = ! delete = ! [x_none_read] create = admin,member read = ! update = ! delete = ! [x_none_update] create = admin,member read = admin,member update = ! delete = admin,member [x_none_delete] create = admin,member read = admin,member update = admin,member delete = ! [x_case_insensitive] create = admin,Member read = admin,Member update = admin,Member delete = admin,Member [x_foo_matcher] create = admin read = admin update = admin delete = admin [x_foo_*] create = @ read = @ update = @ delete = @ [.*] create = admin read = admin update = admin delete = admin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/etc/schema-image.json0000664000175000017500000000000300000000000021430 0ustar00zuulzuul00000000000000{} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8863063 glance-29.0.0/glance/tests/functional/0000775000175000017500000000000000000000000017613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/__init__.py0000664000175000017500000021074600000000000021736 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base test class for running non-stubbed tests (functional tests) The FunctionalTest class contains helper methods for starting the API and Registry server, grabbing the logs of each, cleaning up pidfiles, and spinning down the servers. """ import abc import atexit import datetime import errno import os import platform import shutil import signal import socket import subprocess import sys import tempfile from testtools import content as ttc import textwrap import time from unittest import mock import urllib.parse as urlparse import uuid import fixtures import glance_store from os_win import utilsfactory as os_win_utilsfactory from oslo_config import cfg from oslo_serialization import jsonutils import testtools import webob from glance.common import config from glance.common import utils from glance.common import wsgi from glance.db.sqlalchemy import api as db_api from glance import tests as glance_tests from glance.tests import utils as test_utils execute, get_unused_port = test_utils.execute, test_utils.get_unused_port tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'} if os.name == 'nt': SQLITE_CONN_TEMPLATE = 'sqlite:///%s/tests.sqlite' else: SQLITE_CONN_TEMPLATE = 'sqlite:////%s/tests.sqlite' CONF = cfg.CONF import glance.async_ # NOTE(danms): Default to eventlet threading for tests try: glance.async_.set_threadpool_model('eventlet') except RuntimeError: pass class BaseServer(metaclass=abc.ABCMeta): """ Class used to easily manage starting and stopping a server during functional test runs. """ def __init__(self, test_dir, port, sock=None): """ Creates a new Server object. :param test_dir: The directory where all test stuff is kept. This is passed from the FunctionalTestCase. :param port: The port to start a server up on. """ self.debug = True self.no_venv = False self.test_dir = test_dir self.bind_port = port self.conf_file_name = None self.conf_base = None self.paste_conf_base = None self.exec_env = None self.deployment_flavor = '' self.show_image_direct_url = False self.show_multiple_locations = False self.do_secure_hash = True self.http_retries = '3' self.property_protection_file = '' self.needs_database = False self.log_file = None self.sock = sock self.fork_socket = True self.process_pid = None self.server_module = None self.stop_kill = False def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file and the over-ridden config content (may be useful for populating error messages). """ if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) if os.path.exists(conf_filepath): os.unlink(conf_filepath) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") if os.path.exists(paste_conf_filepath): os.unlink(paste_conf_filepath) utils.safe_mkdirs(conf_dir) def override_conf(filepath, overridden): with open(filepath, 'w') as conf_file: conf_file.write(overridden) conf_file.flush() return conf_file.name overridden_core = self.conf_base % conf_override self.conf_file_name = override_conf(conf_filepath, overridden_core) overridden_paste = '' if self.paste_conf_base: overridden_paste = self.paste_conf_base % conf_override override_conf(paste_conf_filepath, overridden_paste) overridden = ('==Core config==\n%s\n==Paste config==\n%s' % (overridden_core, overridden_paste)) return self.conf_file_name, overridden @abc.abstractmethod def start(self, expect_exit=True, expected_exitcode=0, **kwargs): pass @abc.abstractmethod def stop(self): pass def reload(self, expect_exit=True, expected_exitcode=0, **kwargs): """ Start and stop the service to reload Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.stop() return self.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') with open(conf_filepath, 'w') as conf_file: conf_file.write('[database]\n') conf_file.write('connection = %s' % self.sql_connection) conf_file.flush() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] shutil.copyfile(db_location, "%s/tests.sqlite" % self.test_dir) else: cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % (sys.executable, conf_filepath)) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True) # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) shutil.copyfile('%s/tests.sqlite' % self.test_dir, db_location) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db) def dump_log(self): if not self.log_file: return "log_file not set for {name}".format(name=self.server_name) elif not os.path.exists(self.log_file): return "{log_file} for {name} did not exist".format( log_file=self.log_file, name=self.server_name) with open(self.log_file, 'r') as fptr: return fptr.read().strip() class PosixServer(BaseServer): def start(self, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts the server. Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ # Ensure the configuration file is written self.write_conf(**kwargs) self.create_database() cmd = ("%(server_module)s --config-file %(conf_file_name)s" % {"server_module": self.server_module, "conf_file_name": self.conf_file_name}) cmd = "%s -m %s" % (sys.executable, cmd) # close the sock and release the unused port closer to start time if self.exec_env: exec_env = self.exec_env.copy() else: exec_env = {} pass_fds = set() if self.sock: if not self.fork_socket: self.sock.close() self.sock = None else: fd = os.dup(self.sock.fileno()) exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd) pass_fds.add(fd) self.sock.close() self.process_pid = test_utils.fork_exec(cmd, logfile=os.devnull, exec_env=exec_env, pass_fds=pass_fds) self.stop_kill = not expect_exit if self.pid_file: pf = open(self.pid_file, 'w') pf.write('%d\n' % self.process_pid) pf.close() if not expect_exit: rc = 0 try: os.kill(self.process_pid, 0) except OSError: raise RuntimeError("The process did not start") else: rc = test_utils.wait_for_fork( self.process_pid, expected_exitcode=expected_exitcode, force=False) # avoid an FD leak if self.sock: os.close(fd) self.sock = None return (rc, '', '') def stop(self): """ Spin down the server. """ if not self.process_pid: raise Exception('why is this being called? %s' % self.server_name) if self.stop_kill: os.kill(self.process_pid, signal.SIGTERM) rc = test_utils.wait_for_fork(self.process_pid, raise_error=False, force=self.stop_kill) return (rc, '', '') class Win32Server(BaseServer): def __init__(self, *args, **kwargs): super(Win32Server, self).__init__(*args, **kwargs) self._processutils = os_win_utilsfactory.get_processutils() def start(self, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts the server. Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ # Ensure the configuration file is written self.write_conf(**kwargs) self.create_database() cmd = ("%(server_module)s --config-file %(conf_file_name)s" % {"server_module": self.server_module, "conf_file_name": self.conf_file_name}) cmd = "%s -m %s" % (sys.executable, cmd) # Passing socket objects on Windows is a bit more cumbersome. # We don't really have to do it. if self.sock: self.sock.close() self.sock = None self.process = subprocess.Popen( cmd, env=self.exec_env) self.process_pid = self.process.pid try: self.job_handle = self._processutils.kill_process_on_job_close( self.process_pid) except Exception: # Could not associate child process with a job, killing it. self.process.kill() raise self.stop_kill = not expect_exit if self.pid_file: pf = open(self.pid_file, 'w') pf.write('%d\n' % self.process_pid) pf.close() rc = 0 if expect_exit: self.process.communicate() rc = self.process.returncode return (rc, '', '') def stop(self): """ Spin down the server. """ if not self.process_pid: raise Exception('Server "%s" process not running.' % self.server_name) if self.stop_kill: self.process.terminate() return (0, '', '') if os.name == 'nt': Server = Win32Server else: Server = PosixServer class ApiServer(Server): """ Server object that starts/stops/manages the API server """ def __init__(self, test_dir, port, policy_file, delayed_delete=False, pid_file=None, sock=None, **kwargs): super(ApiServer, self).__init__(test_dir, port, sock=sock) self.server_name = 'api' self.server_module = 'glance.cmd.%s' % self.server_name self.default_store = kwargs.get("default_store", "file") self.bind_host = "127.0.0.1" self.metadata_encryption_key = "012345678901234567890123456789ab" self.image_dir = os.path.join(self.test_dir, "images") self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid") self.log_file = os.path.join(self.test_dir, "api.log") self.image_size_cap = 1099511627776 self.delayed_delete = delayed_delete self.workers = 0 self.scrub_time = 5 self.image_cache_dir = os.path.join(self.test_dir, 'cache') self.image_cache_driver = 'sqlite' self.policy_file = policy_file self.policy_default_rule = 'default' self.property_protection_rule_format = 'roles' self.image_member_quota = 10 self.image_property_quota = 10 self.image_tag_quota = 10 self.image_location_quota = 2 self.disable_path = None self.enforce_new_defaults = True self.needs_database = True default_sql_connection = SQLITE_CONN_TEMPLATE % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.user_storage_quota = '0' self.lock_path = self.test_dir self.node_staging_uri = 'file://%s' % os.path.join( self.test_dir, 'staging') self.conf_base = """[DEFAULT] debug = %(debug)s default_log_levels = eventlet.wsgi.server=DEBUG,stevedore.extension=INFO bind_host = %(bind_host)s bind_port = %(bind_port)s metadata_encryption_key = %(metadata_encryption_key)s log_file = %(log_file)s image_size_cap = %(image_size_cap)d delayed_delete = %(delayed_delete)s workers = %(workers)s scrub_time = %(scrub_time)s image_cache_dir = %(image_cache_dir)s image_cache_driver = %(image_cache_driver)s show_image_direct_url = %(show_image_direct_url)s show_multiple_locations = %(show_multiple_locations)s do_secure_hash = %(do_secure_hash)s http_retries = %(http_retries)s user_storage_quota = %(user_storage_quota)s lock_path = %(lock_path)s property_protection_file = %(property_protection_file)s property_protection_rule_format = %(property_protection_rule_format)s image_member_quota=%(image_member_quota)s image_property_quota=%(image_property_quota)s image_tag_quota=%(image_tag_quota)s image_location_quota=%(image_location_quota)s node_staging_uri=%(node_staging_uri)s [database] connection = %(sql_connection)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s enforce_new_defaults=%(enforce_new_defaults)s [paste_deploy] flavor = %(deployment_flavor)s [glance_store] filesystem_store_datadir=%(image_dir)s default_store = %(default_store)s [import_filtering_opts] allowed_ports = [] """ self.paste_conf_base = """[composite:glance-api] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors versionnegotiation gzip unauthenticated-context rootapp [composite:glance-api-caching] paste.composite_factory = glance.api:root_app_factory /: api-caching /healthcheck: healthcheck [pipeline:api-caching] pipeline = cors versionnegotiation gzip context cache rootapp [composite:glance-api-cachemanagement] paste.composite_factory = glance.api:root_app_factory /: api-cachemanagement /healthcheck: healthcheck [pipeline:api-cachemanagement] pipeline = cors versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [composite:glance-api-fakeauth] paste.composite_factory = glance.api:root_app_factory /: api-fakeauth /healthcheck: healthcheck [pipeline:api-fakeauth] pipeline = cors versionnegotiation gzip fakeauth context rootapp [composite:glance-api-noauth] paste.composite_factory = glance.api:root_app_factory /: api-noauth /healthcheck: healthcheck [pipeline:api-noauth] pipeline = cors versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = %(disable_path)s [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory allowed_origin=http://valid.example.com """ class ApiServerForMultipleBackend(Server): """ Server object that starts/stops/manages the API server """ def __init__(self, test_dir, port, policy_file, delayed_delete=False, pid_file=None, sock=None, **kwargs): super(ApiServerForMultipleBackend, self).__init__( test_dir, port, sock=sock) self.server_name = 'api' self.server_module = 'glance.cmd.%s' % self.server_name self.default_backend = kwargs.get("default_backend", "file1") self.bind_host = "127.0.0.1" self.metadata_encryption_key = "012345678901234567890123456789ab" self.image_dir_backend_1 = os.path.join(self.test_dir, "images_1") self.image_dir_backend_2 = os.path.join(self.test_dir, "images_2") self.image_dir_backend_3 = os.path.join(self.test_dir, "images_3") self.staging_dir = os.path.join(self.test_dir, "staging") self.pid_file = pid_file or os.path.join(self.test_dir, "multiple_backend_api.pid") self.log_file = os.path.join(self.test_dir, "multiple_backend_api.log") self.image_size_cap = 1099511627776 self.delayed_delete = delayed_delete self.workers = 0 self.scrub_time = 5 self.image_cache_dir = os.path.join(self.test_dir, 'cache') self.image_cache_driver = 'sqlite' self.policy_file = policy_file self.policy_default_rule = 'default' self.property_protection_rule_format = 'roles' self.image_member_quota = 10 self.image_property_quota = 10 self.image_tag_quota = 10 self.image_location_quota = 2 self.disable_path = None self.enforce_new_defaults = True self.needs_database = True default_sql_connection = SQLITE_CONN_TEMPLATE % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.user_storage_quota = '0' self.lock_path = self.test_dir self.conf_base = """[DEFAULT] debug = %(debug)s default_log_levels = eventlet.wsgi.server=DEBUG,stevedore.extension=INFO bind_host = %(bind_host)s bind_port = %(bind_port)s metadata_encryption_key = %(metadata_encryption_key)s log_file = %(log_file)s image_size_cap = %(image_size_cap)d delayed_delete = %(delayed_delete)s workers = %(workers)s scrub_time = %(scrub_time)s image_cache_dir = %(image_cache_dir)s image_cache_driver = %(image_cache_driver)s show_image_direct_url = %(show_image_direct_url)s show_multiple_locations = %(show_multiple_locations)s do_secure_hash = %(do_secure_hash)s http_retries = %(http_retries)s user_storage_quota = %(user_storage_quota)s lock_path = %(lock_path)s property_protection_file = %(property_protection_file)s property_protection_rule_format = %(property_protection_rule_format)s image_member_quota=%(image_member_quota)s image_property_quota=%(image_property_quota)s image_tag_quota=%(image_tag_quota)s image_location_quota=%(image_location_quota)s enabled_backends=file1:file,file2:file,file3:file [database] connection = %(sql_connection)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s enforce_new_defaults=%(enforce_new_defaults)s [paste_deploy] flavor = %(deployment_flavor)s [glance_store] default_backend = %(default_backend)s [file1] filesystem_store_datadir=%(image_dir_backend_1)s [file2] filesystem_store_datadir=%(image_dir_backend_2)s [file3] filesystem_store_datadir=%(image_dir_backend_3)s [import_filtering_opts] allowed_ports = [] [os_glance_staging_store] filesystem_store_datadir=%(staging_dir)s """ self.paste_conf_base = """[composite:glance-api] paste.composite_factory = glance.api:root_app_factory /: api /healthcheck: healthcheck [pipeline:api] pipeline = cors versionnegotiation gzip unauthenticated-context rootapp [composite:glance-api-caching] paste.composite_factory = glance.api:root_app_factory /: api-caching /healthcheck: healthcheck [pipeline:api-caching] pipeline = cors versionnegotiation gzip unauthenticated-context cache rootapp [composite:glance-api-cachemanagement] paste.composite_factory = glance.api:root_app_factory /: api-cachemanagement /healthcheck: healthcheck [pipeline:api-cachemanagement] pipeline = cors versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [composite:glance-api-fakeauth] paste.composite_factory = glance.api:root_app_factory /: api-fakeauth /healthcheck: healthcheck [pipeline:api-fakeauth] pipeline = cors versionnegotiation gzip fakeauth context rootapp [composite:glance-api-noauth] paste.composite_factory = glance.api:root_app_factory /: api-noauth /healthcheck: healthcheck [pipeline:api-noauth] pipeline = cors versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = %(disable_path)s [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory allowed_origin=http://valid.example.com """ class ScrubberDaemon(Server): """ Server object that starts/stops/manages the Scrubber server """ def __init__(self, test_dir, policy_file, daemon=False, **kwargs): # NOTE(jkoelker): Set the port to 0 since we actually don't listen super(ScrubberDaemon, self).__init__(test_dir, 0) self.server_name = 'scrubber' self.server_module = 'glance.cmd.%s' % self.server_name self.daemon = daemon self.image_dir = os.path.join(self.test_dir, "images") self.scrub_time = 5 self.pid_file = os.path.join(self.test_dir, "scrubber.pid") self.log_file = os.path.join(self.test_dir, "scrubber.log") self.metadata_encryption_key = "012345678901234567890123456789ab" self.lock_path = self.test_dir default_sql_connection = SQLITE_CONN_TEMPLATE % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.policy_file = policy_file self.policy_default_rule = 'default' self.conf_base = """[DEFAULT] debug = %(debug)s log_file = %(log_file)s daemon = %(daemon)s wakeup_time = 2 scrub_time = %(scrub_time)s metadata_encryption_key = %(metadata_encryption_key)s lock_path = %(lock_path)s sql_idle_timeout = 3600 [database] connection = %(sql_connection)s [glance_store] filesystem_store_datadir=%(image_dir)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s """ def start(self, expect_exit=True, expected_exitcode=0, **kwargs): if 'daemon' in kwargs: expect_exit = False return super(ScrubberDaemon, self).start( expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) class FunctionalTest(test_utils.BaseTestCase): """ Base test class for any test that wants to test the actual servers and clients and not just the stubbed out interfaces """ inited = False disabled = False launched_servers = [] def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() # NOTE: Scrubber is enabled by default for the functional tests. # Please disable it by explicitly setting 'self.include_scrubber' to # False in the test SetUps that do not require Scrubber to run. self.include_scrubber = True # The clients will try to connect to this address. Let's make sure # we're not using the default '0.0.0.0' self.config(bind_host='127.0.0.1') self.config(image_cache_dir=self.test_dir) self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.copy_data_file('property-protections-policies.conf', conf_dir) self.property_file_roles = os.path.join(conf_dir, 'property-protections.conf') property_policies = 'property-protections-policies.conf' self.property_file_policies = os.path.join(conf_dir, property_policies) self.policy_file = os.path.join(conf_dir, 'policy.yaml') self.api_server = ApiServer(self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [self.api_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.launched_servers = [] # Keep track of servers we've logged so we don't double-log them. self._attached_server_logs = [] self.addOnException(self.add_log_details_on_exception) if not self.disabled: # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self.addCleanup( self._reset_database, self.api_server.sql_connection) self.addCleanup(self.cleanup) self._reset_database(self.api_server.sql_connection) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def set_policy_rules(self, rules): fap = open(self.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() def _reset_database(self, conn_string): conn_pieces = urlparse.urlparse(conn_string) if conn_string.startswith('sqlite'): # We leave behind the sqlite DB for failing tests to aid # in diagnosis, as the file size is relatively small and # won't interfere with subsequent tests as it's in a per- # test directory (which is blown-away if the test is green) pass elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p%s" % auth_pieces[1] sql = ("drop database if exists %(database)s; " "create database %(database)s;") % {'database': database} cmd = ("mysql -u%(user)s %(password)s -h%(host)s " "-e\"%(sql)s\"") % {'user': user, 'password': password, 'host': host, 'sql': sql} exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) def cleanup(self): """ Makes sure anything we created or started up in the tests are destroyed or spun down """ # NOTE(jbresnah) call stop on each of the servers instead of # checking the pid file. stop() will wait until the child # server is dead. This eliminates the possibility of a race # between a child process listening on a port actually dying # and a new process being started servers = [self.api_server, self.scrubber_daemon] for s in servers: try: s.stop() except Exception: pass for f in self.files_to_destroy: if os.path.exists(f): os.unlink(f) def start_server(self, server, expect_launch, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts a server on an unused port. Any kwargs passed to this method will override the configuration value in the conf file used in starting the server. :param server: the server to launch :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion :param expected_exitcode: expected exitcode from the launcher """ self.cleanup() # Start up the requested server exitcode, out, err = server.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) if expect_exit: self.assertEqual(expected_exitcode, exitcode, "Failed to spin up the requested server. " "Got: %s" % err) self.launched_servers.append(server) launch_msg = self.wait_for_servers([server], expect_launch) self.assertTrue(launch_msg is None, launch_msg) def start_with_retry(self, server, port_name, max_retries, expect_launch=True, **kwargs): """ Starts a server, with retries if the server launches but fails to start listening on the expected port. :param server: the server to launch :param port_name: the name of the port attribute :param max_retries: the maximum number of attempts :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion """ launch_msg = None for i in range(max_retries): exitcode, out, err = server.start(expect_exit=not expect_launch, **kwargs) name = server.server_name self.assertEqual(0, exitcode, "Failed to spin up the %s server. " "Got: %s" % (name, err)) launch_msg = self.wait_for_servers([server], expect_launch) if launch_msg: server.stop() server.bind_port = get_unused_port() setattr(self, port_name, server.bind_port) else: self.launched_servers.append(server) break self.assertTrue(launch_msg is None, launch_msg) def start_servers(self, **kwargs): """ Starts the API and Registry servers (glance-control api start ) on unused ports. glance-control should be installed into the python path Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.cleanup() # Start up the API server self.start_with_retry(self.api_server, 'api_port', 3, **kwargs) if self.include_scrubber: exitcode, out, err = self.scrubber_daemon.start(**kwargs) self.assertEqual(0, exitcode, "Failed to spin up the Scrubber daemon. " "Got: %s" % err) def ping_server(self, port): """ Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(("127.0.0.1", port)) return True except socket.error: return False finally: s.close() def ping_server_ipv6(self, port): """ Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... The function uses IPv6 (therefore AF_INET6 and ::1). """ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: s.connect(("::1", port)) return True except socket.error: return False finally: s.close() def wait_for_servers(self, servers, expect_launch=True, timeout=30): """ Tight loop, waiting for the given server port(s) to be available. Returns when all are pingable. There is a timeout on waiting for the servers to come up. :param servers: Glance server ports to ping :param expect_launch: Optional, true iff the server(s) are expected to successfully start :param timeout: Optional, defaults to 30 seconds :returns: None if launch expectation is met, otherwise an assertion message """ now = datetime.datetime.now() timeout_time = now + datetime.timedelta(seconds=timeout) replied = [] while (timeout_time > now): pinged = 0 for server in servers: if self.ping_server(server.bind_port): pinged += 1 if server not in replied: replied.append(server) if pinged == len(servers): msg = 'Unexpected server launch status' return None if expect_launch else msg now = datetime.datetime.now() time.sleep(0.05) failed = list(set(servers) - set(replied)) msg = 'Unexpected server launch status for: ' for f in failed: msg += ('%s, ' % f.server_name) if os.path.exists(f.pid_file): pid = f.process_pid trace = f.pid_file.replace('.pid', '.trace') if self.tracecmd: cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace) try: execute(cmd, raise_error=False, expect_exit=False) except OSError as e: if e.errno == errno.ENOENT: raise RuntimeError('No executable found for "%s" ' 'command.' % self.tracecmd) else: raise time.sleep(0.5) if os.path.exists(trace): msg += ('\n%s:\n%s\n' % (self.tracecmd, open(trace).read())) self.add_log_details(failed) return msg if expect_launch else None def stop_server(self, server): """ Called to stop a single server in a normal fashion using the glance-control stop method to gracefully shut the server down. :param server: the server to stop """ # Spin down the requested server server.stop() def stop_servers(self): """ Called to stop the started servers in a normal fashion. Note that cleanup() will stop the servers using a fairly draconian method of sending a SIGTERM signal to the servers. Here, we use the glance-control stop method to gracefully shut the server down. This method also asserts that the shutdown was clean, and so it is meant to be called during a normal test case sequence. """ # Spin down the API server self.stop_server(self.api_server) if self.include_scrubber: self.stop_server(self.scrubber_daemon) def copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glance/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def add_log_details_on_exception(self, *args, **kwargs): self.add_log_details() def add_log_details(self, servers=None): for s in servers or self.launched_servers: if s.log_file not in self._attached_server_logs: self._attached_server_logs.append(s.log_file) self.addDetail( s.server_name, testtools.content.text_content(s.dump_log())) class MultipleBackendFunctionalTest(test_utils.BaseTestCase): """ Base test class for any test that wants to test the actual servers and clients and not just the stubbed out interfaces """ inited = False disabled = False launched_servers = [] def setUp(self): super(MultipleBackendFunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() # NOTE: Scrubber is enabled by default for the functional tests. # Please disable it by explicitly setting 'self.include_scrubber' to # False in the test SetUps that do not require Scrubber to run. self.include_scrubber = True self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.copy_data_file('property-protections-policies.conf', conf_dir) self.property_file_roles = os.path.join(conf_dir, 'property-protections.conf') property_policies = 'property-protections-policies.conf' self.property_file_policies = os.path.join(conf_dir, property_policies) self.policy_file = os.path.join(conf_dir, 'policy.yaml') self.api_server_multiple_backend = ApiServerForMultipleBackend( self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [self.api_server_multiple_backend.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.launched_servers = [] # Keep track of servers we've logged so we don't double-log them. self._attached_server_logs = [] self.addOnException(self.add_log_details_on_exception) if not self.disabled: # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self.addCleanup( self._reset_database, self.api_server_multiple_backend.sql_connection) self.addCleanup(self.cleanup) self._reset_database( self.api_server_multiple_backend.sql_connection) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def set_policy_rules(self, rules): fap = open(self.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() def _reset_database(self, conn_string): conn_pieces = urlparse.urlparse(conn_string) if conn_string.startswith('sqlite'): # We leave behind the sqlite DB for failing tests to aid # in diagnosis, as the file size is relatively small and # won't interfere with subsequent tests as it's in a per- # test directory (which is blown-away if the test is green) pass elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p%s" % auth_pieces[1] sql = ("drop database if exists %(database)s; " "create database %(database)s;") % {'database': database} cmd = ("mysql -u%(user)s %(password)s -h%(host)s " "-e\"%(sql)s\"") % {'user': user, 'password': password, 'host': host, 'sql': sql} exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) def cleanup(self): """ Makes sure anything we created or started up in the tests are destroyed or spun down """ # NOTE(jbresnah) call stop on each of the servers instead of # checking the pid file. stop() will wait until the child # server is dead. This eliminates the possibility of a race # between a child process listening on a port actually dying # and a new process being started servers = [self.api_server_multiple_backend, self.scrubber_daemon] for s in servers: try: s.stop() except Exception: pass for f in self.files_to_destroy: if os.path.exists(f): os.unlink(f) def start_server(self, server, expect_launch, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts a server on an unused port. Any kwargs passed to this method will override the configuration value in the conf file used in starting the server. :param server: the server to launch :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion :param expected_exitcode: expected exitcode from the launcher """ self.cleanup() # Start up the requested server exitcode, out, err = server.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) if expect_exit: self.assertEqual(expected_exitcode, exitcode, "Failed to spin up the requested server. " "Got: %s" % err) self.launched_servers.append(server) launch_msg = self.wait_for_servers([server], expect_launch) self.assertTrue(launch_msg is None, launch_msg) def start_with_retry(self, server, port_name, max_retries, expect_launch=True, **kwargs): """ Starts a server, with retries if the server launches but fails to start listening on the expected port. :param server: the server to launch :param port_name: the name of the port attribute :param max_retries: the maximum number of attempts :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion """ launch_msg = None for i in range(max_retries): exitcode, out, err = server.start(expect_exit=not expect_launch, **kwargs) name = server.server_name self.assertEqual(0, exitcode, "Failed to spin up the %s server. " "Got: %s" % (name, err)) launch_msg = self.wait_for_servers([server], expect_launch) if launch_msg: server.stop() server.bind_port = get_unused_port() setattr(self, port_name, server.bind_port) else: self.launched_servers.append(server) break self.assertTrue(launch_msg is None, launch_msg) def start_servers(self, **kwargs): """ Starts the API and Registry servers (glance-control api start ) on unused ports. glance-control should be installed into the python path Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.cleanup() # Start up the API server self.start_with_retry(self.api_server_multiple_backend, 'api_port', 3, **kwargs) if self.include_scrubber: exitcode, out, err = self.scrubber_daemon.start(**kwargs) self.assertEqual(0, exitcode, "Failed to spin up the Scrubber daemon. " "Got: %s" % err) def ping_server(self, port): """ Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(("127.0.0.1", port)) return True except socket.error: return False finally: s.close() def ping_server_ipv6(self, port): """ Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... The function uses IPv6 (therefore AF_INET6 and ::1). """ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: s.connect(("::1", port)) return True except socket.error: return False finally: s.close() def wait_for_servers(self, servers, expect_launch=True, timeout=30): """ Tight loop, waiting for the given server port(s) to be available. Returns when all are pingable. There is a timeout on waiting for the servers to come up. :param servers: Glance server ports to ping :param expect_launch: Optional, true iff the server(s) are expected to successfully start :param timeout: Optional, defaults to 30 seconds :returns: None if launch expectation is met, otherwise an assertion message """ now = datetime.datetime.now() timeout_time = now + datetime.timedelta(seconds=timeout) replied = [] while (timeout_time > now): pinged = 0 for server in servers: if self.ping_server(server.bind_port): pinged += 1 if server not in replied: replied.append(server) if pinged == len(servers): msg = 'Unexpected server launch status' return None if expect_launch else msg now = datetime.datetime.now() time.sleep(0.05) failed = list(set(servers) - set(replied)) msg = 'Unexpected server launch status for: ' for f in failed: msg += ('%s, ' % f.server_name) if os.path.exists(f.pid_file): pid = f.process_pid trace = f.pid_file.replace('.pid', '.trace') if self.tracecmd: cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace) try: execute(cmd, raise_error=False, expect_exit=False) except OSError as e: if e.errno == errno.ENOENT: raise RuntimeError('No executable found for "%s" ' 'command.' % self.tracecmd) else: raise time.sleep(0.5) if os.path.exists(trace): msg += ('\n%s:\n%s\n' % (self.tracecmd, open(trace).read())) self.add_log_details(failed) return msg if expect_launch else None def stop_server(self, server): """ Called to stop a single server in a normal fashion using the glance-control stop method to gracefully shut the server down. :param server: the server to stop """ # Spin down the requested server server.stop() def stop_servers(self): """ Called to stop the started servers in a normal fashion. Note that cleanup() will stop the servers using a fairly draconian method of sending a SIGTERM signal to the servers. Here, we use the glance-control stop method to gracefully shut the server down. This method also asserts that the shutdown was clean, and so it is meant to be called during a normal test case sequence. """ # Spin down the API self.stop_server(self.api_server_multiple_backend) if self.include_scrubber: self.stop_server(self.scrubber_daemon) def copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glance/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def add_log_details_on_exception(self, *args, **kwargs): self.add_log_details() def add_log_details(self, servers=None): for s in servers or self.launched_servers: if s.log_file not in self._attached_server_logs: self._attached_server_logs.append(s.log_file) self.addDetail( s.server_name, testtools.content.text_content(s.dump_log())) class SynchronousAPIBase(test_utils.BaseTestCase): """A base class that provides synchronous calling into the API. This provides a way to directly call into the API WSGI stack without starting a separate server, and with a simple paste pipeline. Configured with multi-store and a real database. This differs from the FunctionalTest lineage above in that they start a full copy of the API server as a separate process, whereas this calls directly into the WSGI stack. This test base is appropriate for situations where you need to be able to mock the state of the world (i.e. warp time, or inject errors) but should not be used for happy-path testing where FunctionalTest provides more isolation. To use this, inherit and run start_server() before you are ready to make API calls (either in your setUp() or per-test if you need to change config or mocking). Once started, use the api_get(), api_put(), api_post(), and api_delete() methods to make calls to the API. """ TENANT = str(uuid.uuid4()) @mock.patch('oslo_db.sqlalchemy.enginefacade.writer.get_engine') def setup_database(self, mock_get_engine): """Configure and prepare a fresh sqlite database.""" db_file = 'sqlite:///%s/test.db' % self.test_dir self.config(connection=db_file, group='database') # NOTE(danms): Make sure that we clear the current global # database configuration, provision a temporary database file, # and run migrations with our configuration to define the # schema there. db_api.clear_db_env() engine = db_api.get_engine() mock_get_engine.return_value = engine with mock.patch('logging.config'): # NOTE(danms): The alembic config in the env module will break our # BaseTestCase logging setup. So mock that out to prevent it while # we db_sync. test_utils.db_sync(engine=engine) def setup_simple_paste(self): """Setup a very simple no-auth paste pipeline. This configures the API to be very direct, including only the middleware absolutely required for consistent API calls. """ self.paste_config = os.path.join(self.test_dir, 'glance-api-paste.ini') with open(self.paste_config, 'w') as f: f.write(textwrap.dedent(""" [filter:context] paste.filter_factory = glance.api.middleware.context:\ ContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:\ FakeAuthMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:\ CacheFilter.factory [filter:cachemanage] paste.filter_factory = glance.api.middleware.cache_manage:\ CacheManageFilter.factory [pipeline:glance-api-cachemanagement] pipeline = context cache cachemanage rootapp [pipeline:glance-api-caching] pipeline = context cache rootapp [pipeline:glance-api] pipeline = context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /v2: apiv2app [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory """)) def _store_dir(self, store): return os.path.join(self.test_dir, store) def setup_stores(self): """Configures multiple backend stores. This configures the API with three file-backed stores (store1, store2, and store3) as well as a os_glance_staging_store for imports. """ self.config(enabled_backends={'store1': 'file', 'store2': 'file', 'store3': 'file'}) glance_store.register_store_opts(CONF, reserved_stores=wsgi.RESERVED_STORES) self.config(default_backend='store1', group='glance_store') self.config(filesystem_store_datadir=self._store_dir('store1'), group='store1') self.config(filesystem_store_datadir=self._store_dir('store2'), group='store2') self.config(filesystem_store_datadir=self._store_dir('store3'), group='store3') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') self.config(filesystem_store_datadir=self._store_dir('tasks'), group='os_glance_tasks_store') glance_store.create_multi_stores(CONF, reserved_stores=wsgi.RESERVED_STORES) glance_store.verify_store() def setUp(self): super(SynchronousAPIBase, self).setUp() self.setup_database() self.setup_simple_paste() self.setup_stores() def start_server(self, enable_cache=True, set_worker_url=True): """Builds and "starts" the API server. Note that this doesn't actually "start" anything like FunctionalTest does above, but that terminology is used here to make it seem like the same sort of pattern. """ config.set_config_defaults() root_app = 'glance-api' if enable_cache: root_app = 'glance-api-cachemanagement' self.config(image_cache_dir=self._store_dir('cache')) if set_worker_url: self.config(worker_self_reference_url='http://workerx') self.api = config.load_paste_app(root_app, conf_file=self.paste_config) self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': self.TENANT, 'Content-Type': 'application/json', 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def api_request(self, method, url, headers=None, data=None, json=None, body_file=None): """Perform a request against the API. NOTE: Most code should use api_get(), api_post(), api_put(), or api_delete() instead! :param method: The HTTP method to use (i.e. GET, POST, etc) :param url: The *path* part of the URL to call (i.e. /v2/images) :param headers: Optional updates to the default set of headers :param data: Optional bytes data payload to send (overrides @json) :param json: Optional dict structure to be jsonified and sent as the payload (mutually exclusive with @data) :param body_file: Optional io.IOBase to provide as the input data stream for the request (overrides @data) :returns: A webob.Response object """ headers = self._headers(headers) req = webob.Request.blank(url, method=method, headers=headers) if json and not data: data = jsonutils.dumps(json).encode() if data and not body_file: req.body = data elif body_file: req.body_file = body_file return self.api(req) def api_get(self, url, headers=None): """Perform a GET request against the API. :param url: The *path* part of the URL to call (i.e. /v2/images) :param headers: Optional updates to the default set of headers :returns: A webob.Response object """ return self.api_request('GET', url, headers=headers) def api_post(self, url, headers=None, data=None, json=None, body_file=None): """Perform a POST request against the API. :param url: The *path* part of the URL to call (i.e. /v2/images) :param headers: Optional updates to the default set of headers :param data: Optional bytes data payload to send (overrides @json) :param json: Optional dict structure to be jsonified and sent as the payload (mutually exclusive with @data) :param body_file: Optional io.IOBase to provide as the input data stream for the request (overrides @data) :returns: A webob.Response object """ return self.api_request('POST', url, headers=headers, data=data, json=json, body_file=body_file) def api_put(self, url, headers=None, data=None, json=None, body_file=None): """Perform a PUT request against the API. :param url: The *path* part of the URL to call (i.e. /v2/images) :param headers: Optional updates to the default set of headers :param data: Optional bytes data payload to send (overrides @json, mutually exclusive with body_file) :param json: Optional dict structure to be jsonified and sent as the payload (mutually exclusive with @data) :param body_file: Optional io.IOBase to provide as the input data stream for the request (overrides @data) :returns: A webob.Response object """ return self.api_request('PUT', url, headers=headers, data=data, json=json, body_file=body_file) def api_delete(self, url, headers=None): """Perform a DELETE request against the API. :param url: The *path* part of the URL to call (i.e. /v2/images) :param headers: Optional updates to the default set of headers :returns: A webob.Response object """ return self.api_request('DELETE', url, headers=headers) def api_patch(self, url, *patches, headers=None): """Perform a PATCH request against the API. :param url: The *path* part of the URL to call (i.e. /v2/images) :param patches: One or more patch dicts :param headers: Optional updates to the default set of headers :returns: A webob.Response object """ if not headers: headers = {} headers['Content-Type'] = \ 'application/openstack-images-v2.1-json-patch' return self.api_request('PATCH', url, headers=headers, json=list(patches)) def _import_copy(self, image_id, stores, headers=None): """Do an import of image_id to the given stores.""" body = {'method': {'name': 'copy-image'}, 'stores': stores, 'all_stores': False} return self.api_post( '/v2/images/%s/import' % image_id, headers=headers, json=body) def _import_direct(self, image_id, stores, headers=None): """Do an import of image_id to the given stores.""" body = {'method': {'name': 'glance-direct'}, 'stores': stores, 'all_stores': False} return self.api_post( '/v2/images/%s/import' % image_id, headers=headers, json=body) def _import_web_download(self, image_id, stores, url, headers=None): """Do an import of image_id to the given stores.""" body = {'method': {'name': 'web-download', 'uri': url}, 'stores': stores, 'all_stores': False} return self.api_post( '/v2/images/%s/import' % image_id, headers=headers, json=body) def _create_and_upload(self, data_iter=None, expected_code=204, visibility=None): data = { 'name': 'foo', 'container_format': 'bare', 'disk_format': 'raw' } if visibility: data['visibility'] = visibility resp = self.api_post('/v2/images', json=data) self.assertEqual(201, resp.status_code, resp.text) image = jsonutils.loads(resp.text) if data_iter: resp = self.api_put( '/v2/images/%s/file' % image['id'], headers={'Content-Type': 'application/octet-stream'}, body_file=data_iter) else: resp = self.api_put( '/v2/images/%s/file' % image['id'], headers={'Content-Type': 'application/octet-stream'}, data=b'IMAGEDATA') self.assertEqual(expected_code, resp.status_code) return image['id'] def _create_and_stage(self, data_iter=None, expected_code=204, visibility=None, extra={}): data = { 'name': 'foo', 'container_format': 'bare', 'disk_format': 'raw', } if visibility: data['visibility'] = visibility data.update(extra) resp = self.api_post('/v2/images', json=data) image = jsonutils.loads(resp.text) if data_iter: resp = self.api_put( '/v2/images/%s/stage' % image['id'], headers={'Content-Type': 'application/octet-stream'}, body_file=data_iter) else: resp = self.api_put( '/v2/images/%s/stage' % image['id'], headers={'Content-Type': 'application/octet-stream'}, data=b'IMAGEDATA') self.assertEqual(expected_code, resp.status_code) return image['id'] def _wait_for_import(self, image_id, retries=10): for i in range(0, retries): image = self.api_get('/v2/images/%s' % image_id).json if not image.get('os_glance_import_task'): break self.addDetail('Create-Import task id', ttc.text_content(image['os_glance_import_task'])) time.sleep(1) self.assertIsNone(image.get('os_glance_import_task'), 'Timed out waiting for task to complete') return image def _create_and_import(self, stores=[], data_iter=None, expected_code=202, visibility=None, extra={}): """Create an image, stage data, and import into the given stores. :returns: image_id """ image_id = self._create_and_stage(data_iter=data_iter, visibility=visibility, extra=extra) resp = self._import_direct(image_id, stores) self.assertEqual(expected_code, resp.status_code) if expected_code >= 400: return image_id # Make sure it becomes active image = self._wait_for_import(image_id) self.assertEqual('active', image['status']) return image_id def _get_latest_task(self, image_id): tasks = self.api_get('/v2/images/%s/tasks' % image_id).json['tasks'] tasks = sorted(tasks, key=lambda t: t['updated_at']) self.assertGreater(len(tasks), 0) return tasks[-1] def _create(self): return self.api_post('/v2/images', json={'name': 'foo', 'container_format': 'bare', 'disk_format': 'raw'}) def _create_metadef_resource(self, path=None, data=None, expected_code=201): resp = self.api_post(path, json=data) md_resource = jsonutils.loads(resp.text) self.assertEqual(expected_code, resp.status_code) return md_resource ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8863063 glance-29.0.0/glance/tests/functional/db/0000775000175000017500000000000000000000000020200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/__init__.py0000664000175000017500000000206500000000000022314 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(markwash): These functions are used in the base tests cases to # set up the db api implementation under test. Rather than accessing them # directly, test modules should use the load and reset functions below. get_db = None reset_db = None def load(get_db_fn, reset_db_fn): global get_db, reset_db get_db = get_db_fn reset_db = reset_db_fn def reset(): global get_db, reset_db get_db = None reset_db = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/base.py0000664000175000017500000034432100000000000021473 0ustar00zuulzuul00000000000000# Copyright 2010-2012 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import functools from unittest import mock import uuid from oslo_db import exception as db_exception from oslo_db.sqlalchemy import utils as sqlalchemyutils from sqlalchemy import sql from glance.common import exception from glance.common import timeutils from glance import context from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import models from glance.tests import functional import glance.tests.functional.db as db_tests from glance.tests import utils as test_utils # The default sort order of results is whatever sort key is specified, # plus created_at and id for ties. When we're not specifying a sort_key, # we get the default (created_at). Some tests below expect the fixtures to be # returned in array-order, so if the created_at timestamps are the same, # these tests rely on the UUID* values being in order UUID1, UUID2, UUID3 = sorted([str(uuid.uuid4()) for x in range(3)]) def build_image_fixture(**kwargs): default_datetime = timeutils.utcnow() image = { 'id': str(uuid.uuid4()), 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': [{'url': "file:///tmp/glance-tests/2", 'metadata': {}, 'status': 'active'}], 'properties': {}, } if 'visibility' in kwargs: image.pop('is_public') image.update(kwargs) return image def build_task_fixture(**kwargs): default_datetime = timeutils.utcnow() task = { 'id': str(uuid.uuid4()), 'type': 'import', 'status': 'pending', 'input': {'ping': 'pong'}, 'owner': str(uuid.uuid4()), 'message': None, 'expires_at': None, 'created_at': default_datetime, 'updated_at': default_datetime, } task.update(kwargs) return task class FunctionalInitWrapper(functional.FunctionalTest): def setUp(self): super(FunctionalInitWrapper, self).setUp() self.config(policy_file=self.policy_file, group='oslo_policy') class TestDriver(test_utils.BaseTestCase): def setUp(self): super(TestDriver, self).setUp() context_cls = context.RequestContext self.adm_context = context_cls(is_admin=True, auth_token='user:user:admin') self.context = context_cls(is_admin=False, auth_token='user:user:user') self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.fixtures = self.build_image_fixtures() self.create_images(self.fixtures) def build_image_fixtures(self): dt1 = timeutils.utcnow() dt2 = dt1 + datetime.timedelta(microseconds=5) fixtures = [ { 'id': UUID1, 'created_at': dt1, 'updated_at': dt1, 'properties': {'foo': 'bar', 'far': 'boo'}, 'protected': True, 'size': 13, }, { 'id': UUID2, 'created_at': dt1, 'updated_at': dt2, 'size': 17, }, { 'id': UUID3, 'created_at': dt2, 'updated_at': dt2, }, ] return [build_image_fixture(**fixture) for fixture in fixtures] def create_images(self, images): for fixture in images: self.db_api.image_create(self.adm_context, fixture) self.delay_inaccurate_clock() class DriverTests(object): def test_image_create_requires_status(self): fixture = {'name': 'mark', 'size': 12} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) fixture = {'name': 'mark', 'size': 12, 'status': 'queued'} self.db_api.image_create(self.context, fixture) @mock.patch.object(timeutils, 'utcnow') def test_image_create_defaults(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() create_time = timeutils.utcnow() values = {'status': 'queued', 'created_at': create_time, 'updated_at': create_time} image = self.db_api.image_create(self.context, values) self.assertIsNone(image['name']) self.assertIsNone(image['container_format']) self.assertEqual(0, image['min_ram']) self.assertEqual(0, image['min_disk']) self.assertIsNone(image['owner']) self.assertEqual('shared', image['visibility']) self.assertIsNone(image['size']) self.assertIsNone(image['checksum']) self.assertIsNone(image['disk_format']) self.assertEqual([], image['locations']) self.assertFalse(image['protected']) self.assertFalse(image['deleted']) self.assertIsNone(image['deleted_at']) self.assertEqual([], image['properties']) self.assertEqual(create_time, image['created_at']) self.assertEqual(create_time, image['updated_at']) # Image IDs aren't predictable, but they should be populated self.assertTrue(uuid.UUID(image['id'])) # NOTE(bcwaldon): the tags attribute should not be returned as a part # of a core image entity self.assertNotIn('tags', image) def test_image_create_duplicate_id(self): self.assertRaises(exception.Duplicate, self.db_api.image_create, self.context, {'id': UUID1, 'status': 'queued'}) def test_image_create_with_locations(self): locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': locations} image = self.db_api.image_create(self.context, fixture) actual = [{'url': location['url'], 'metadata': location['metadata'], 'status': location['status']} for location in image['locations']] self.assertEqual(locations, actual) def test_image_create_without_locations(self): locations = [] fixture = {'status': 'queued', 'locations': locations} self.db_api.image_create(self.context, fixture) def test_image_create_with_location_data(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} image = self.db_api.image_create(self.context, fixture) actual = [{'url': location['url'], 'metadata': location['metadata'], 'status': location['status']} for location in image['locations']] self.assertEqual(location_data, actual) def test_image_create_properties(self): fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} image = self.db_api.image_create(self.context, fixture) expected = [{'name': 'ping', 'value': 'pong'}] actual = [{'name': p['name'], 'value': p['value']} for p in image['properties']] self.assertEqual(expected, actual) def test_image_create_unknown_attributes(self): fixture = {'ping': 'pong'} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_name(self): bad_name = 'A name with forbidden symbol \U0001f62a' fixture = {'name': bad_name, 'size': 12, 'status': 'queued'} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_checksum(self): # checksum should be no longer than 32 characters bad_checksum = "42" * 42 fixture = {'checksum': bad_checksum} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) # if checksum is not longer than 32 characters but non-ascii -> # still raise 400 fixture = {'checksum': '\u042f' * 32} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_int_params(self): int_too_long = 2 ** 31 + 42 for param in ['min_disk', 'min_ram']: fixture = {param: int_too_long} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_property(self): # bad value fixture = {'status': 'queued', 'properties': {'bad': 'Bad \U0001f62a'}} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) # bad property names are also not allowed fixture = {'status': 'queued', 'properties': {'Bad \U0001f62a': 'ok'}} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_location(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'Bad \U0001f60a', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_update_core_attribute(self): fixture = {'status': 'queued'} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual('queued', image['status']) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_with_locations(self): locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'locations': locations} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(locations, image['locations']) def test_image_update_with_location_data(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'locations': location_data} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(location_data, image['locations']) def test_image_update(self): fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} image = self.db_api.image_update(self.adm_context, UUID3, fixture) expected = [{'name': 'ping', 'value': 'pong'}] actual = [{'name': p['name'], 'value': p['value']} for p in image['properties']] self.assertEqual(expected, actual) self.assertEqual('queued', image['status']) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_properties(self): fixture = {'properties': {'ping': 'pong'}} self.delay_inaccurate_clock() image = self.db_api.image_update(self.adm_context, UUID1, fixture) expected = {'ping': 'pong', 'foo': 'bar', 'far': 'boo'} actual = {p['name']: p['value'] for p in image['properties']} self.assertEqual(expected, actual) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_purge_properties(self): fixture = {'properties': {'ping': 'pong'}} image = self.db_api.image_update(self.adm_context, UUID1, fixture, purge_props=True) properties = {p['name']: p for p in image['properties']} # New properties are set self.assertIn('ping', properties) self.assertEqual('pong', properties['ping']['value']) self.assertFalse(properties['ping']['deleted']) # Original properties still show up, but with deleted=True # TODO(markwash): db api should not return deleted properties self.assertIn('foo', properties) self.assertEqual('bar', properties['foo']['value']) self.assertTrue(properties['foo']['deleted']) def test_image_update_bad_name(self): fixture = {'name': 'A new name with forbidden symbol \U0001f62a'} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_image_update_bad_property(self): # bad value fixture = {'status': 'queued', 'properties': {'bad': 'Bad \U0001f62a'}} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) # bad property names are also not allowed fixture = {'status': 'queued', 'properties': {'Bad \U0001f62a': 'ok'}} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_image_update_bad_location(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'Bad \U0001f60a', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_update_locations_direct(self): """ For some reasons update_locations can be called directly (not via image_update), so better check that everything is ok if passed 4 byte unicode characters """ # update locations correctly first to retrieve existing location id location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}] fixture = {'locations': location_data} image = self.db_api.image_update(self.adm_context, UUID1, fixture) self.assertEqual(1, len(image['locations'])) self.assertIn('id', image['locations'][0]) loc_id = image['locations'][0].pop('id') bad_location = {'url': 'Bad \U0001f60a', 'metadata': {}, 'status': 'active', 'id': loc_id} self.assertRaises(exception.Invalid, self.db_api.image_location_update, self.adm_context, UUID1, bad_location) def test_image_property_delete(self): fixture = {'name': 'ping', 'value': 'pong', 'image_id': UUID1} prop = self.db_api.image_property_create(self.context, fixture) prop = self.db_api.image_property_delete(self.context, prop['name'], UUID1) self.assertIsNotNone(prop['deleted_at']) self.assertTrue(prop['deleted']) def test_image_get(self): image = self.db_api.image_get(self.context, UUID1) self.assertEqual(self.fixtures[0]['id'], image['id']) def test_image_get_disallow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, UUID1) def test_image_get_allow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) image = self.db_api.image_get(self.adm_context, UUID1) self.assertEqual(self.fixtures[0]['id'], image['id']) self.assertTrue(image['deleted']) def test_image_get_force_allow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) image = self.db_api.image_get(self.context, UUID1, force_show_deleted=True) self.assertEqual(self.fixtures[0]['id'], image['id']) def test_image_get_not_owned(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) image = self.db_api.image_create( ctxt1, {'status': 'queued', 'owner': TENANT1}) self.assertRaises(exception.Forbidden, self.db_api.image_get, ctxt2, image['id']) def test_image_get_not_found(self): UUID = str(uuid.uuid4()) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, UUID) def test_image_get_all(self): images = self.db_api.image_get_all(self.context) self.assertEqual(3, len(images)) def test_image_get_all_with_filter(self): images = self.db_api.image_get_all(self.context, filters={ 'id': self.fixtures[0]['id'], }) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_user_defined_property(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar'}) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_nonexistent_userdef_property(self): images = self.db_api.image_get_all(self.context, filters={'faz': 'boo'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_userdef_prop_nonexistent_value(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'baz'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_multiple_user_defined_properties(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar', 'far': 'boo'}) self.assertEqual(1, len(images)) self.assertEqual(images[0]['id'], self.fixtures[0]['id']) def test_image_get_all_with_filter_nonexistent_user_defined_property(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar', 'faz': 'boo'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_user_deleted_property(self): fixture = {'name': 'poo', 'value': 'bear', 'image_id': UUID1} prop = self.db_api.image_property_create(self.context, fixture) images = self.db_api.image_get_all(self.context, filters={ 'properties': {'poo': 'bear'}, }) self.assertEqual(1, len(images)) self.db_api.image_property_delete(self.context, prop['name'], images[0]['id']) images = self.db_api.image_get_all(self.context, filters={ 'properties': {'poo': 'bear'}, }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_undefined_property(self): images = self.db_api.image_get_all(self.context, filters={'poo': 'bear'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_protected(self): images = self.db_api.image_get_all(self.context, filters={'protected': True}) self.assertEqual(1, len(images)) images = self.db_api.image_get_all(self.context, filters={'protected': False}) self.assertEqual(2, len(images)) def test_image_get_all_with_filter_comparative_created_at(self): anchor = timeutils.isotime(self.fixtures[0]['created_at']) time_expr = 'lt:' + anchor images = self.db_api.image_get_all(self.context, filters={'created_at': time_expr}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_comparative_updated_at(self): anchor = timeutils.isotime(self.fixtures[0]['updated_at']) time_expr = 'lt:' + anchor images = self.db_api.image_get_all(self.context, filters={'updated_at': time_expr}) self.assertEqual(0, len(images)) def test_filter_image_by_invalid_operator(self): self.assertRaises(exception.InvalidFilterOperatorValue, self.db_api.image_get_all, self.context, filters={'status': 'lala:active'}) def test_image_get_all_with_filter_in_status(self): images = self.db_api.image_get_all(self.context, filters={'status': 'in:active'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_name(self): data = 'in:%s' % self.fixtures[0]['name'] images = self.db_api.image_get_all(self.context, filters={'name': data}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_container_format(self): images = self.db_api.image_get_all(self.context, filters={'container_format': 'in:ami,bare,ovf'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_disk_format(self): images = self.db_api.image_get_all(self.context, filters={'disk_format': 'in:vhd'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_id(self): data = 'in:%s,%s' % (UUID1, UUID2) images = self.db_api.image_get_all(self.context, filters={'id': data}) self.assertEqual(2, len(images)) def test_image_get_all_with_quotes(self): fixture = {'name': 'fake\\\"name'} self.db_api.image_update(self.adm_context, UUID3, fixture) fixture = {'name': 'fake,name'} self.db_api.image_update(self.adm_context, UUID2, fixture) fixture = {'name': 'fakename'} self.db_api.image_update(self.adm_context, UUID1, fixture) data = 'in:\"fake\\\"name\",fakename,\"fake,name\"' images = self.db_api.image_get_all(self.context, filters={'name': data}) self.assertEqual(3, len(images)) def test_image_get_all_with_invalid_quotes(self): invalid_expr = ['in:\"name', 'in:\"name\"name', 'in:name\"dd\"', 'in:na\"me', 'in:\"name\"\"name\"'] for expr in invalid_expr: self.assertRaises(exception.InvalidParameterValue, self.db_api.image_get_all, self.context, filters={'name': expr}) def test_image_get_all_size_min_max(self): images = self.db_api.image_get_all(self.context, filters={ 'size_min': 10, 'size_max': 15, }) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_size_min(self): images = self.db_api.image_get_all(self.context, filters={'size_min': 15}) self.assertEqual(2, len(images)) self.assertEqual(self.fixtures[2]['id'], images[0]['id']) self.assertEqual(self.fixtures[1]['id'], images[1]['id']) def test_image_get_all_size_range(self): images = self.db_api.image_get_all(self.context, filters={'size_max': 15, 'size_min': 20}) self.assertEqual(0, len(images)) def test_image_get_all_size_max(self): images = self.db_api.image_get_all(self.context, filters={'size_max': 15}) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_min_range_bad_value(self): self.assertRaises(exception.InvalidFilterRangeValue, self.db_api.image_get_all, self.context, filters={'size_min': 'blah'}) def test_image_get_all_with_filter_max_range_bad_value(self): self.assertRaises(exception.InvalidFilterRangeValue, self.db_api.image_get_all, self.context, filters={'size_max': 'blah'}) def test_image_get_all_marker(self): images = self.db_api.image_get_all(self.context, marker=UUID3) self.assertEqual(2, len(images)) def test_image_get_all_marker_with_size(self): # Use sort_key=size to test BigInteger images = self.db_api.image_get_all(self.context, sort_key=['size'], marker=UUID3) self.assertEqual(2, len(images)) self.assertEqual(17, images[0]['size']) self.assertEqual(13, images[1]['size']) def test_image_get_all_marker_deleted(self): """Cannot specify a deleted image as a marker.""" self.db_api.image_destroy(self.adm_context, UUID1) filters = {'deleted': False} self.assertRaises(exception.NotFound, self.db_api.image_get_all, self.context, marker=UUID1, filters=filters) def test_image_get_all_marker_deleted_showing_deleted_as_admin(self): """Specify a deleted image as a marker if showing deleted images.""" self.db_api.image_destroy(self.adm_context, UUID3) images = self.db_api.image_get_all(self.adm_context, marker=UUID3) # NOTE(bcwaldon): an admin should see all images (deleted or not) self.assertEqual(2, len(images)) def test_image_get_all_marker_deleted_showing_deleted(self): """Specify a deleted image as a marker if showing deleted images. A non-admin user has to explicitly ask for deleted images, and should only see deleted images in the results """ self.db_api.image_destroy(self.adm_context, UUID3) self.db_api.image_destroy(self.adm_context, UUID1) filters = {'deleted': True} images = self.db_api.image_get_all(self.context, marker=UUID3, filters=filters) self.assertEqual(1, len(images)) def test_image_get_all_marker_null_name_desc(self): """Check an image with name null is handled Check an image with name null is handled marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'name': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['name'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_disk_format_desc(self): """Check an image with disk_format null is handled Check an image with disk_format null is handled when marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'disk_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['disk_format'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_container_format_desc(self): """Check an image with container_format null is handled Check an image with container_format null is handled when marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'container_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['container_format'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_name_asc(self): """Check an image with name null is handled Check an image with name null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'name': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['name'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_disk_format_asc(self): """Check an image with disk_format null is handled Check an image with disk_format null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'disk_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['disk_format'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_container_format_asc(self): """Check an image with container_format null is handled Check an image with container_format null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'container_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['container_format'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_limit(self): images = self.db_api.image_get_all(self.context, limit=2) self.assertEqual(2, len(images)) # A limit of None should not equate to zero images = self.db_api.image_get_all(self.context, limit=None) self.assertEqual(3, len(images)) # A limit of zero should actually mean zero images = self.db_api.image_get_all(self.context, limit=0) self.assertEqual(0, len(images)) def test_image_get_all_owned(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) image_meta_data = {'id': UUIDX, 'status': 'queued', 'owner': TENANT1} self.db_api.image_create(ctxt1, image_meta_data) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDY = str(uuid.uuid4()) image_meta_data = {'id': UUIDY, 'status': 'queued', 'owner': TENANT2} self.db_api.image_create(ctxt2, image_meta_data) images = self.db_api.image_get_all(ctxt1) image_ids = [image['id'] for image in images] expected = [UUIDX, UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_owned_checksum(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) CHECKSUM1 = '91264c3edf5972c9f1cb309543d38a5c' image_meta_data = { 'id': UUIDX, 'status': 'queued', 'checksum': CHECKSUM1, 'owner': TENANT1 } self.db_api.image_create(ctxt1, image_meta_data) image_member_data = { 'image_id': UUIDX, 'member': TENANT1, 'can_share': False, "status": "accepted", } self.db_api.image_member_create(ctxt1, image_member_data) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDY = str(uuid.uuid4()) CHECKSUM2 = '92264c3edf5972c9f1cb309543d38a5c' image_meta_data = { 'id': UUIDY, 'status': 'queued', 'checksum': CHECKSUM2, 'owner': TENANT2 } self.db_api.image_create(ctxt2, image_meta_data) image_member_data = { 'image_id': UUIDY, 'member': TENANT2, 'can_share': False, "status": "accepted", } self.db_api.image_member_create(ctxt2, image_member_data) filters = {'visibility': 'shared', 'checksum': CHECKSUM2} images = self.db_api.image_get_all(ctxt2, filters) self.assertEqual(1, len(images)) self.assertEqual(UUIDY, images[0]['id']) def test_image_get_all_with_filter_tags(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') self.db_api.image_tag_create(self.context, UUID1, '64bit') self.db_api.image_tag_create(self.context, UUID2, 'power') self.db_api.image_tag_create(self.context, UUID2, '64bit') images = self.db_api.image_get_all(self.context, filters={'tags': ['64bit']}) self.assertEqual(2, len(images)) image_ids = [image['id'] for image in images] expected = [UUID1, UUID2] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_with_filter_multi_tags(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') self.db_api.image_tag_create(self.context, UUID1, '64bit') self.db_api.image_tag_create(self.context, UUID2, 'power') self.db_api.image_tag_create(self.context, UUID2, '64bit') images = self.db_api.image_get_all(self.context, filters={'tags': ['64bit', 'power'] }) self.assertEqual(1, len(images)) self.assertEqual(UUID2, images[0]['id']) def test_image_get_all_with_filter_tags_and_nonexistent(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') images = self.db_api.image_get_all(self.context, filters={'tags': ['x86', 'fake'] }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_deleted_tags(self): tag = self.db_api.image_tag_create(self.context, UUID1, 'AIX') images = self.db_api.image_get_all(self.context, filters={ 'tags': [tag], }) self.assertEqual(1, len(images)) self.db_api.image_tag_delete(self.context, UUID1, tag) images = self.db_api.image_get_all(self.context, filters={ 'tags': [tag], }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_undefined_tags(self): images = self.db_api.image_get_all(self.context, filters={'tags': ['fake']}) self.assertEqual(0, len(images)) def test_image_paginate(self): """Paginate through a list of images using limit and marker""" now = timeutils.utcnow() extra_uuids = [(str(uuid.uuid4()), now + datetime.timedelta(seconds=i * 5)) for i in range(2)] extra_images = [build_image_fixture(id=_id, created_at=_dt, updated_at=_dt) for _id, _dt in extra_uuids] self.create_images(extra_images) # Reverse uuids to match default sort of created_at extra_uuids.reverse() page = self.db_api.image_get_all(self.context, limit=2) self.assertEqual([i[0] for i in extra_uuids], [i['id'] for i in page]) last = page[-1]['id'] page = self.db_api.image_get_all(self.context, limit=2, marker=last) self.assertEqual([UUID3, UUID2], [i['id'] for i in page]) page = self.db_api.image_get_all(self.context, limit=2, marker=UUID2) self.assertEqual([UUID1], [i['id'] for i in page]) def test_image_get_all_invalid_sort_key(self): self.assertRaises(exception.InvalidSortKey, self.db_api.image_get_all, self.context, sort_key=['blah']) def test_image_get_all_limit_marker(self): images = self.db_api.image_get_all(self.context, limit=2) self.assertEqual(2, len(images)) def test_image_get_all_with_tag_returning(self): expected_tags = {UUID1: ['foo'], UUID2: ['bar'], UUID3: ['baz']} self.db_api.image_tag_create(self.context, UUID1, expected_tags[UUID1][0]) self.db_api.image_tag_create(self.context, UUID2, expected_tags[UUID2][0]) self.db_api.image_tag_create(self.context, UUID3, expected_tags[UUID3][0]) images = self.db_api.image_get_all(self.context, return_tag=True) self.assertEqual(3, len(images)) for image in images: self.assertIn('tags', image) self.assertEqual(expected_tags[image['id']], image['tags']) self.db_api.image_tag_delete(self.context, UUID1, expected_tags[UUID1][0]) expected_tags[UUID1] = [] images = self.db_api.image_get_all(self.context, return_tag=True) self.assertEqual(3, len(images)) for image in images: self.assertIn('tags', image) self.assertEqual(expected_tags[image['id']], image['tags']) def test_image_destroy(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} image = self.db_api.image_create(self.context, fixture) IMG_ID = image['id'] fixture = {'name': 'ping', 'value': 'pong', 'image_id': IMG_ID} prop = self.db_api.image_property_create(self.context, fixture) TENANT2 = str(uuid.uuid4()) fixture = {'image_id': IMG_ID, 'member': TENANT2, 'can_share': False} member = self.db_api.image_member_create(self.context, fixture) self.db_api.image_tag_create(self.context, IMG_ID, 'snarf') self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(location_data, image['locations']) self.assertEqual(('ping', 'pong', IMG_ID, False), (prop['name'], prop['value'], prop['image_id'], prop['deleted'])) self.assertEqual((TENANT2, IMG_ID, False), (member['member'], member['image_id'], member['can_share'])) self.assertEqual(['snarf'], self.db_api.image_tag_get_all(self.context, IMG_ID)) image = self.db_api.image_destroy(self.adm_context, IMG_ID) self.assertTrue(image['deleted']) self.assertTrue(image['deleted_at']) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, IMG_ID) self.assertEqual([], image['locations']) prop = image['properties'][0] self.assertEqual(('ping', IMG_ID, True), (prop['name'], prop['image_id'], prop['deleted'])) self.context.auth_token = 'user:%s:user' % TENANT2 members = self.db_api.image_member_find(self.context, IMG_ID) self.assertEqual([], members) tags = self.db_api.image_tag_get_all(self.context, IMG_ID) self.assertEqual([], tags) def test_image_destroy_with_delete_all(self): """Check the image child element's _image_delete_all methods. checks if all the image_delete_all methods deletes only the child elements of the image to be deleted. """ TENANT2 = str(uuid.uuid4()) location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] def _create_image_with_child_entries(): fixture = {'status': 'queued', 'locations': location_data} image_id = self.db_api.image_create(self.context, fixture)['id'] fixture = {'name': 'ping', 'value': 'pong', 'image_id': image_id} self.db_api.image_property_create(self.context, fixture) fixture = {'image_id': image_id, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(self.context, fixture) self.db_api.image_tag_create(self.context, image_id, 'snarf') return image_id ACTIVE_IMG_ID = _create_image_with_child_entries() DEL_IMG_ID = _create_image_with_child_entries() deleted_image = self.db_api.image_destroy(self.adm_context, DEL_IMG_ID) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, DEL_IMG_ID) active_image = self.db_api.image_get(self.context, ACTIVE_IMG_ID) self.assertFalse(active_image['deleted']) self.assertFalse(active_image['deleted_at']) self.assertEqual(2, len(active_image['locations'])) self.assertIn('id', active_image['locations'][0]) self.assertIn('id', active_image['locations'][1]) active_image['locations'][0].pop('id') active_image['locations'][1].pop('id') self.assertEqual(location_data, active_image['locations']) self.assertEqual(1, len(active_image['properties'])) prop = active_image['properties'][0] self.assertEqual(('ping', 'pong', ACTIVE_IMG_ID), (prop['name'], prop['value'], prop['image_id'])) self.assertEqual((False, None), (prop['deleted'], prop['deleted_at'])) self.context.auth_token = 'user:%s:user' % TENANT2 members = self.db_api.image_member_find(self.context, ACTIVE_IMG_ID) self.assertEqual(1, len(members)) member = members[0] self.assertEqual((TENANT2, ACTIVE_IMG_ID, False), (member['member'], member['image_id'], member['can_share'])) tags = self.db_api.image_tag_get_all(self.context, ACTIVE_IMG_ID) self.assertEqual(['snarf'], tags) def test_image_get_multiple_members(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDX = str(uuid.uuid4()) # We need a shared image and context.owner should not match image # owner self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(ctxt1, values) image = self.db_api.image_get(ctxt2, UUIDX) self.assertEqual(UUIDX, image['id']) # by default get_all displays only images with status 'accepted' images = self.db_api.image_get_all(ctxt2) self.assertEqual(3, len(images)) # filter by rejected images = self.db_api.image_get_all(ctxt2, member_status='rejected') self.assertEqual(3, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, filters={'visibility': 'shared'}) self.assertEqual(0, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, member_status='pending', filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, member_status='all', filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) # filter by status pending images = self.db_api.image_get_all(ctxt2, member_status='pending') self.assertEqual(4, len(images)) # filter by status all images = self.db_api.image_get_all(ctxt2, member_status='all') self.assertEqual(4, len(images)) def test_is_image_visible(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDX = str(uuid.uuid4()) # We need a shared image and context.owner should not match image # owner image = self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(ctxt1, values) result = self.db_api.is_image_visible(ctxt2, image) self.assertTrue(result) # image should not be visible for a deleted member members = self.db_api.image_member_find(ctxt1, image_id=UUIDX) self.db_api.image_member_delete(ctxt1, members[0]['id']) result = self.db_api.is_image_visible(ctxt2, image) self.assertFalse(result) def test_is_community_image_visible(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) owners_ctxt = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) viewing_ctxt = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDX = str(uuid.uuid4()) # We need a community image and context.owner should not match image # owner image = self.db_api.image_create(owners_ctxt, {'id': UUIDX, 'status': 'queued', 'visibility': 'community', 'owner': TENANT1}) # image should be visible in every context result = self.db_api.is_image_visible(owners_ctxt, image) self.assertTrue(result) result = self.db_api.is_image_visible(viewing_ctxt, image) self.assertTrue(result) def test_image_tag_create(self): tag = self.db_api.image_tag_create(self.context, UUID1, 'snap') self.assertEqual('snap', tag) def test_image_tag_create_bad_value(self): self.assertRaises(exception.Invalid, self.db_api.image_tag_create, self.context, UUID1, 'Bad \U0001f62a') def test_image_tag_set_all(self): tags = self.db_api.image_tag_get_all(self.context, UUID1) self.assertEqual([], tags) self.db_api.image_tag_set_all(self.context, UUID1, ['ping', 'pong']) tags = self.db_api.image_tag_get_all(self.context, UUID1) # NOTE(bcwaldon): tag ordering should match exactly what was provided self.assertEqual(['ping', 'pong'], tags) def test_image_tag_get_all(self): self.db_api.image_tag_create(self.context, UUID1, 'snap') self.db_api.image_tag_create(self.context, UUID1, 'snarf') self.db_api.image_tag_create(self.context, UUID2, 'snarf') # Check the tags for the first image tags = self.db_api.image_tag_get_all(self.context, UUID1) expected = ['snap', 'snarf'] self.assertEqual(expected, tags) # Check the tags for the second image tags = self.db_api.image_tag_get_all(self.context, UUID2) expected = ['snarf'] self.assertEqual(expected, tags) def test_image_tag_get_all_no_tags(self): actual = self.db_api.image_tag_get_all(self.context, UUID1) self.assertEqual([], actual) def test_image_tag_get_all_non_existent_image(self): bad_image_id = str(uuid.uuid4()) actual = self.db_api.image_tag_get_all(self.context, bad_image_id) self.assertEqual([], actual) def test_image_tag_delete(self): self.db_api.image_tag_create(self.context, UUID1, 'snap') self.db_api.image_tag_delete(self.context, UUID1, 'snap') self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, self.context, UUID1, 'snap') @mock.patch.object(timeutils, 'utcnow') def test_image_member_create(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() memberships = self.db_api.image_member_find(self.context) self.assertEqual([], memberships) TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) memberships = self.db_api.image_member_find(self.context) self.assertEqual(1, len(memberships)) actual = memberships[0] self.assertIsNotNone(actual['created_at']) self.assertIsNotNone(actual['updated_at']) actual.pop('id') actual.pop('created_at') actual.pop('updated_at') expected = { 'member': TENANT1, 'image_id': UUID1, 'can_share': False, 'status': 'pending', 'deleted': False, } self.assertEqual(expected, actual) def test_image_member_update(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 member = self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) member_id = member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) self.delay_inaccurate_clock() member = self.db_api.image_member_update(self.context, member_id, {'can_share': True}) self.assertNotEqual(member['created_at'], member['updated_at']) member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': True, 'deleted': False} self.assertEqual(expected, member) members = self.db_api.image_member_find(self.context, member=TENANT1, image_id=UUID1) member = members[0] member.pop('id') member.pop('created_at') member.pop('updated_at') self.assertEqual(expected, member) def test_image_member_update_status(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 member = self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) member_id = member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) self.delay_inaccurate_clock() member = self.db_api.image_member_update(self.context, member_id, {'status': 'accepted'}) self.assertNotEqual(member['created_at'], member['updated_at']) member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'accepted', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) members = self.db_api.image_member_find(self.context, member=TENANT1, image_id=UUID1) member = members[0] member.pop('id') member.pop('created_at') member.pop('updated_at') self.assertEqual(expected, member) def test_image_member_find(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) fixtures = [ {'member': TENANT1, 'image_id': UUID1}, {'member': TENANT1, 'image_id': UUID2, 'status': 'rejected'}, {'member': TENANT2, 'image_id': UUID1, 'status': 'accepted'}, ] for f in fixtures: self.db_api.image_member_create(self.context, copy.deepcopy(f)) def _simplify(output): return def _assertMemberListMatch(list1, list2): def _simple(x): return set([(o['member'], o['image_id']) for o in x]) self.assertEqual(_simple(list1), _simple(list2)) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 output = self.db_api.image_member_find(self.context, member=TENANT1) _assertMemberListMatch([fixtures[0], fixtures[1]], output) output = self.db_api.image_member_find(self.adm_context, image_id=UUID1) _assertMemberListMatch([fixtures[0], fixtures[2]], output) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT2 output = self.db_api.image_member_find(self.context, member=TENANT2, image_id=UUID1) _assertMemberListMatch([fixtures[2]], output) output = self.db_api.image_member_find(self.context, status='accepted') _assertMemberListMatch([fixtures[2]], output) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 output = self.db_api.image_member_find(self.context, status='rejected') _assertMemberListMatch([fixtures[1]], output) output = self.db_api.image_member_find(self.context, status='pending') _assertMemberListMatch([fixtures[0]], output) output = self.db_api.image_member_find(self.context, status='pending', image_id=UUID2) _assertMemberListMatch([], output) image_id = str(uuid.uuid4()) output = self.db_api.image_member_find(self.context, member=TENANT2, image_id=image_id) _assertMemberListMatch([], output) def test_image_member_count(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) actual = self.db_api.image_member_count(self.context, UUID1) self.assertEqual(1, actual) def test_image_member_count_invalid_image_id(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) self.assertRaises(exception.Invalid, self.db_api.image_member_count, self.context, None) def test_image_member_count_empty_image_id(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) self.assertRaises(exception.Invalid, self.db_api.image_member_count, self.context, "") def test_image_member_delete(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 fixture = {'member': TENANT1, 'image_id': UUID1, 'can_share': True} member = self.db_api.image_member_create(self.context, fixture) self.assertEqual(1, len(self.db_api.image_member_find(self.context))) member = self.db_api.image_member_delete(self.context, member['id']) self.assertEqual(0, len(self.db_api.image_member_find(self.context))) class DriverQuotaTests(test_utils.BaseTestCase): def setUp(self): super(DriverQuotaTests, self).setUp() self.owner_id1 = str(uuid.uuid4()) self.context1 = context.RequestContext( is_admin=False, user=self.owner_id1, tenant=self.owner_id1, auth_token='%s:%s:user' % (self.owner_id1, self.owner_id1)) self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) dt1 = timeutils.utcnow() dt2 = dt1 + datetime.timedelta(microseconds=5) fixtures = [ { 'id': UUID1, 'created_at': dt1, 'updated_at': dt1, 'size': 13, 'owner': self.owner_id1, }, { 'id': UUID2, 'created_at': dt1, 'updated_at': dt2, 'size': 17, 'owner': self.owner_id1, }, { 'id': UUID3, 'created_at': dt2, 'updated_at': dt2, 'size': 7, 'owner': self.owner_id1, }, ] self.owner1_fixtures = [ build_image_fixture(**fixture) for fixture in fixtures] for fixture in self.owner1_fixtures: self.db_api.image_create(self.context1, fixture) def test_storage_quota(self): total = functools.reduce( lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures], ) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) def test_storage_quota_without_image_id(self): total = functools.reduce( lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures], ) total = total - self.owner1_fixtures[0]['size'] x = self.db_api.user_get_storage_usage( self.context1, self.owner_id1, image_id=self.owner1_fixtures[0]['id']) self.assertEqual(total, x) def test_storage_quota_multiple_locations(self): dt1 = timeutils.utcnow() sz = 53 new_fixture_dict = {'id': str(uuid.uuid4()), 'created_at': dt1, 'updated_at': dt1, 'size': sz, 'owner': self.owner_id1} new_fixture = build_image_fixture(**new_fixture_dict) new_fixture['locations'].append({'url': 'file:///some/path/file', 'metadata': {}, 'status': 'active'}) self.db_api.image_create(self.context1, new_fixture) total = functools.reduce( lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures], ) + (sz * 2) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) def test_storage_quota_deleted_image(self): # NOTE(flaper87): This needs to be tested for # soft deleted images as well. Currently there's no # good way to delete locations. dt1 = timeutils.utcnow() sz = 53 image_id = str(uuid.uuid4()) new_fixture_dict = {'id': image_id, 'created_at': dt1, 'updated_at': dt1, 'size': sz, 'owner': self.owner_id1} new_fixture = build_image_fixture(**new_fixture_dict) new_fixture['locations'].append({'url': 'file:///some/path/file', 'metadata': {}, 'status': 'active'}) self.db_api.image_create(self.context1, new_fixture) total = functools.reduce( lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures], ) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total + (sz * 2), x) self.db_api.image_destroy(self.context1, image_id) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) class TaskTests(test_utils.BaseTestCase): def setUp(self): super(TaskTests, self).setUp() self.admin_id = 'admin' self.owner_id = 'user' self.adm_context = context.RequestContext( is_admin=True, auth_token='user:admin:admin', tenant=self.admin_id) self.context = context.RequestContext( is_admin=False, auth_token='user:user:user', user=self.owner_id) self.db_api = db_tests.get_db(self.config) self.fixtures = self.build_task_fixtures() db_tests.reset_db(self.db_api) def build_task_fixtures(self): self.context.project_id = str(uuid.uuid4()) fixtures = [ { 'owner': self.context.owner, 'type': 'import', 'input': {'import_from': 'file:///a.img', 'import_from_format': 'qcow2', 'image_properties': { "name": "GreatStack 1.22", "tags": ["lamp", "custom"] }}, }, { 'owner': self.context.owner, 'type': 'import', 'input': {'import_from': 'file:///b.img', 'import_from_format': 'qcow2', 'image_properties': { "name": "GreatStack 1.23", "tags": ["lamp", "good"] }}, }, { 'owner': self.context.owner, "type": "export", "input": { "export_uuid": "deadbeef-dead-dead-dead-beefbeefbeef", "export_to": "swift://cloud.foo/myaccount/mycontainer/path", "export_format": "qcow2" } }, ] return [build_task_fixture(**fixture) for fixture in fixtures] def test_task_get_all_with_filter(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) import_tasks = self.db_api.task_get_all(self.adm_context, filters={'type': 'import'}) self.assertTrue(import_tasks) self.assertEqual(2, len(import_tasks)) for task in import_tasks: self.assertEqual('import', task['type']) self.assertEqual(self.context.owner, task['owner']) def test_task_get_all_as_admin(self): tasks = [] for fixture in self.fixtures: task = self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks.append(task) import_tasks = self.db_api.task_get_all(self.adm_context) self.assertTrue(import_tasks) self.assertEqual(3, len(import_tasks)) def test_task_get_all_marker(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks = self.db_api.task_get_all(self.adm_context, sort_key='id') task_ids = [t['id'] for t in tasks] tasks = self.db_api.task_get_all(self.adm_context, sort_key='id', marker=task_ids[0]) self.assertEqual(2, len(tasks)) def test_task_get_all_limit(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks = self.db_api.task_get_all(self.adm_context, limit=2) self.assertEqual(2, len(tasks)) # A limit of None should not equate to zero tasks = self.db_api.task_get_all(self.adm_context, limit=None) self.assertEqual(3, len(tasks)) # A limit of zero should actually mean zero tasks = self.db_api.task_get_all(self.adm_context, limit=0) self.assertEqual(0, len(tasks)) def test_task_get_all_owned(self): then = timeutils.utcnow() + datetime.timedelta(days=365) TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) task_values = {'type': 'import', 'status': 'pending', 'input': '{"loc": "fake"}', 'owner': TENANT1, 'expires_at': then} self.db_api.task_create(ctxt1, task_values) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) task_values = {'type': 'export', 'status': 'pending', 'input': '{"loc": "fake"}', 'owner': TENANT2, 'expires_at': then} self.db_api.task_create(ctxt2, task_values) tasks = self.db_api.task_get_all(ctxt1) task_owners = set([task['owner'] for task in tasks]) expected = set([TENANT1]) self.assertEqual(sorted(expected), sorted(task_owners)) def test_task_get(self): expires_at = timeutils.utcnow() image_id = str(uuid.uuid4()) fixture = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah', 'expires_at': expires_at } task = self.db_api.task_create(self.adm_context, fixture) self.assertIsNotNone(task) self.assertIsNotNone(task['id']) task_id = task['id'] task = self.db_api.task_get(self.adm_context, task_id) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('pending', task['status']) self.assertEqual(fixture['input'], task['input']) self.assertEqual(fixture['result'], task['result']) self.assertEqual(fixture['message'], task['message']) self.assertEqual(expires_at, task['expires_at']) def _test_task_get_by_image(self, expired=False, deleted=False, other_owner=False): expires_at = timeutils.utcnow() if expired is False: expires_at += datetime.timedelta(hours=1) elif expired is None: # This is the case where we haven't even processed the task # to give it an expiry time. expires_at = None image_id = str(uuid.uuid4()) fixture = { 'owner': other_owner and 'notme!' or self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah', 'expires_at': expires_at, 'image_id': image_id, 'user_id': 'me', 'request_id': 'reqid', } new_task = self.db_api.task_create(self.adm_context, fixture) if deleted: self.db_api.task_delete(self.context, new_task['id']) return (new_task['id'], self.db_api.tasks_get_by_image(self.context, image_id)) def test_task_get_by_image_not_expired(self): # Make sure we get back the task task_id, tasks = self._test_task_get_by_image(expired=False) self.assertEqual(1, len(tasks)) self.assertEqual(task_id, tasks[0]['id']) def test_task_get_by_image_expired(self): # Make sure we do not retrieve the expired task task_id, tasks = self._test_task_get_by_image(expired=True) self.assertEqual(0, len(tasks)) # We should have deleted the task while querying for it, so make # sure that our task is now marked as deleted. tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(1, len(tasks)) self.assertEqual(task_id, tasks[0]['id']) self.assertTrue(tasks[0]['deleted']) def test_task_get_by_image_no_expiry(self): # Make sure we find the task that has expires_at=NULL task_id, tasks = self._test_task_get_by_image(expired=None) self.assertEqual(1, len(tasks)) # The task should have been retrieved above, and it's also not # deleted because it doesn't have an expiry, so it should # still be in the DB. tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(1, len(tasks)) self.assertEqual(task_id, tasks[0]['id']) self.assertFalse(tasks[0]['deleted']) self.assertIsNone(tasks[0]['expires_at']) def test_task_get_by_image_deleted(self): task_id, tasks = self._test_task_get_by_image(deleted=True) # We cannot see the deleted tasks self.assertEqual(0, len(tasks)) def test_task_get_by_image_not_mine(self): task_id, tasks = self._test_task_get_by_image(other_owner=True) # We cannot see tasks we do not own self.assertEqual(0, len(tasks)) def test_task_get_all(self): now = timeutils.utcnow() then = now + datetime.timedelta(days=365) image_id = str(uuid.uuid4()) fixture1 = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake_1"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah_1', 'expires_at': then, 'created_at': now, 'updated_at': now } fixture2 = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake_2"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah_2', 'expires_at': then, 'created_at': now, 'updated_at': now } task1 = self.db_api.task_create(self.adm_context, fixture1) task2 = self.db_api.task_create(self.adm_context, fixture2) self.assertIsNotNone(task1) self.assertIsNotNone(task2) task1_id = task1['id'] task2_id = task2['id'] task_fixtures = {task1_id: fixture1, task2_id: fixture2} tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(2, len(tasks)) self.assertEqual(set((tasks[0]['id'], tasks[1]['id'])), set((task1_id, task2_id))) for task in tasks: fixture = task_fixtures[task['id']] self.assertEqual(self.context.owner, task['owner']) self.assertEqual(fixture['type'], task['type']) self.assertEqual(fixture['status'], task['status']) self.assertEqual(fixture['expires_at'], task['expires_at']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertEqual(fixture['created_at'], task['created_at']) self.assertEqual(fixture['updated_at'], task['updated_at']) task_details_keys = ['input', 'message', 'result'] for key in task_details_keys: self.assertNotIn(key, task) def test_task_soft_delete(self): now = timeutils.utcnow() then = now + datetime.timedelta(days=365) fixture1 = build_task_fixture(id='1', expires_at=now, owner=self.adm_context.owner) fixture2 = build_task_fixture(id='2', expires_at=now, owner=self.adm_context.owner) fixture3 = build_task_fixture(id='3', expires_at=then, owner=self.adm_context.owner) fixture4 = build_task_fixture(id='4', expires_at=then, owner=self.adm_context.owner) task1 = self.db_api.task_create(self.adm_context, fixture1) task2 = self.db_api.task_create(self.adm_context, fixture2) task3 = self.db_api.task_create(self.adm_context, fixture3) task4 = self.db_api.task_create(self.adm_context, fixture4) self.assertIsNotNone(task1) self.assertIsNotNone(task2) self.assertIsNotNone(task3) self.assertIsNotNone(task4) tasks = self.db_api.task_get_all( self.adm_context, sort_key='id', sort_dir='asc') self.assertEqual(4, len(tasks)) self.assertTrue(tasks[0]['deleted']) self.assertTrue(tasks[1]['deleted']) self.assertFalse(tasks[2]['deleted']) self.assertFalse(tasks[3]['deleted']) def test_task_create(self): task_id = str(uuid.uuid4()) self.context.project_id = self.context.owner values = { 'id': task_id, 'owner': self.context.owner, 'type': 'export', 'status': 'pending', } task_values = build_task_fixture(**values) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('export', task['type']) self.assertEqual('pending', task['status']) self.assertEqual({'ping': 'pong'}, task['input']) def test_task_create_with_all_task_info_null(self): task_id = str(uuid.uuid4()) self.context.project_id = str(uuid.uuid4()) values = { 'id': task_id, 'owner': self.context.owner, 'type': 'export', 'status': 'pending', 'input': None, 'result': None, 'message': None, } task_values = build_task_fixture(**values) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('export', task['type']) self.assertEqual('pending', task['status']) self.assertIsNone(task['input']) self.assertIsNone(task['result']) self.assertIsNone(task['message']) def test_task_update(self): self.context.project_id = str(uuid.uuid4()) result = {'foo': 'bar'} task_values = build_task_fixture(owner=self.context.owner, result=result) task = self.db_api.task_create(self.adm_context, task_values) task_id = task['id'] fixture = { 'status': 'processing', 'message': 'This is a error string', } self.delay_inaccurate_clock() task = self.db_api.task_update(self.adm_context, task_id, fixture) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('processing', task['status']) self.assertEqual({'ping': 'pong'}, task['input']) self.assertEqual(result, task['result']) self.assertEqual('This is a error string', task['message']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertIsNone(task['expires_at']) self.assertEqual(task_values['created_at'], task['created_at']) self.assertGreater(task['updated_at'], task['created_at']) def test_task_update_with_all_task_info_null(self): self.context.project_id = str(uuid.uuid4()) task_values = build_task_fixture(owner=self.context.owner, input=None, result=None, message=None) task = self.db_api.task_create(self.adm_context, task_values) task_id = task['id'] fixture = {'status': 'processing'} self.delay_inaccurate_clock() task = self.db_api.task_update(self.adm_context, task_id, fixture) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('processing', task['status']) self.assertIsNone(task['input']) self.assertIsNone(task['result']) self.assertIsNone(task['message']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertIsNone(task['expires_at']) self.assertEqual(task_values['created_at'], task['created_at']) self.assertGreater(task['updated_at'], task['created_at']) def test_task_delete(self): task_values = build_task_fixture(owner=self.context.owner) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) task_id = task['id'] self.db_api.task_delete(self.adm_context, task_id) self.assertRaises(exception.TaskNotFound, self.db_api.task_get, self.context, task_id) def test_task_delete_as_admin(self): task_values = build_task_fixture(owner=self.context.owner) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) task_id = task['id'] self.db_api.task_delete(self.adm_context, task_id) del_task = self.db_api.task_get(self.adm_context, task_id, force_show_deleted=True) self.assertIsNotNone(del_task) self.assertEqual(task_id, del_task['id']) self.assertTrue(del_task['deleted']) self.assertIsNotNone(del_task['deleted_at']) class DBPurgeTests(test_utils.BaseTestCase): def setUp(self): super(DBPurgeTests, self).setUp() self.adm_context = context.get_admin_context(show_deleted=True) self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.context = context.RequestContext(is_admin=True) self.image_fixtures, self.task_fixtures = self.build_fixtures() self.create_tasks(self.task_fixtures) self.create_images(self.image_fixtures) def build_fixtures(self): dt1 = timeutils.utcnow() - datetime.timedelta(days=5) dt2 = dt1 + datetime.timedelta(days=1) dt3 = dt2 + datetime.timedelta(days=1) fixtures = [ { 'created_at': dt1, 'updated_at': dt1, 'deleted_at': dt3, 'deleted': True, }, { 'created_at': dt1, 'updated_at': dt2, 'deleted_at': timeutils.utcnow(), 'deleted': True, }, { 'created_at': dt2, 'updated_at': dt2, 'deleted_at': None, 'deleted': False, }, ] return ( [build_image_fixture(**fixture) for fixture in fixtures], [build_task_fixture(**fixture) for fixture in fixtures], ) def create_images(self, images): for fixture in images: self.db_api.image_create(self.adm_context, fixture) def create_tasks(self, tasks): for fixture in tasks: self.db_api.task_create(self.adm_context, fixture) def test_db_purge(self): self.db_api.purge_deleted_rows(self.adm_context, 1, 5) images = self.db_api.image_get_all(self.adm_context) # Verify that no records from images have been deleted # as images table will be purged using 'purge_images_table' # command. self.assertEqual(len(images), 3) tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(len(tasks), 2) def test_db_purge_images_table(self): images = self.db_api.image_get_all(self.adm_context) self.assertEqual(len(images), 3) tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(len(tasks), 3) # purge records from locations table for image in images: session = self.db_api.get_session() with session.begin(): session.execute( sql.delete(models.ImageLocation) .where(models.ImageLocation.image_id == image['id']) ) # purge records from images_tags table self.db_api.purge_deleted_rows(self.adm_context, 1, 5) # purge records from images table self.db_api.purge_deleted_rows_from_images(self.adm_context, 1, 5) images = self.db_api.image_get_all(self.adm_context) self.assertEqual(len(images), 2) tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(len(tasks), 2) def test_purge_images_table_fk_constraint_failure(self): """Test foreign key constraint failure Test whether foreign key constraint failure during purge operation is raising DBReferenceError or not. """ session = db_api.get_session() engine = db_api.get_engine() connection = engine.connect() images = sqlalchemyutils.get_table(engine, "images") image_tags = sqlalchemyutils.get_table(engine, "image_tags") # Add a 4th row in images table and set it deleted 15 days ago uuidstr = uuid.uuid4().hex created_time = timeutils.utcnow() - datetime.timedelta(days=20) deleted_time = created_time + datetime.timedelta(days=5) images_row_fixture = { 'id': uuidstr, 'status': 'status', 'created_at': created_time, 'deleted_at': deleted_time, 'deleted': 1, 'visibility': 'public', 'min_disk': 1, 'min_ram': 1, 'protected': 0 } ins_stmt = images.insert().values(**images_row_fixture) with connection.begin(): connection.execute(ins_stmt) # Add a record in image_tags referencing the above images record # but do not set it as deleted image_tags_row_fixture = { 'image_id': uuidstr, 'value': 'tag_value', 'created_at': created_time, 'deleted': 0 } ins_stmt = image_tags.insert().values(**image_tags_row_fixture) with connection.begin(): connection.execute(ins_stmt) # Purge all records deleted at least 10 days ago self.assertRaises(db_exception.DBReferenceError, db_api.purge_deleted_rows_from_images, self.adm_context, age_in_days=10, max_rows=50) # Verify that no records from images have been deleted # due to DBReferenceError being raised with session.begin(): images_rows = session.query(images).count() self.assertEqual(4, images_rows) def test_purge_task_info_with_refs_to_soft_deleted_tasks(self): session = db_api.get_session() engine = db_api.get_engine() # check initial task and task_info row number are 3 tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(3, len(tasks)) task_info = sqlalchemyutils.get_table(engine, 'task_info') with session.begin(): task_info_rows = session.query(task_info).count() self.assertEqual(3, task_info_rows) # purge soft deleted rows older than yesterday self.db_api.purge_deleted_rows(self.context, 1, 5) # check 1 row of task table is purged tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(2, len(tasks)) # and no task_info was left behind, 1 row purged with session.begin(): task_info_rows = session.query(task_info).count() self.assertEqual(2, task_info_rows) class TestVisibility(test_utils.BaseTestCase): def setUp(self): super(TestVisibility, self).setUp() self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.setup_tenants() self.setup_contexts() self.fixtures = self.build_image_fixtures() self.create_images(self.fixtures) def setup_tenants(self): self.admin_tenant = str(uuid.uuid4()) self.tenant1 = str(uuid.uuid4()) self.tenant2 = str(uuid.uuid4()) def setup_contexts(self): self.admin_context = context.RequestContext( is_admin=True, tenant=self.admin_tenant) self.admin_none_context = context.RequestContext( is_admin=True, tenant=None) self.tenant1_context = context.RequestContext(tenant=self.tenant1) self.tenant2_context = context.RequestContext(tenant=self.tenant2) self.none_context = context.RequestContext(tenant=None) def build_image_fixtures(self): fixtures = [] owners = { 'Unowned': None, 'Admin Tenant': self.admin_tenant, 'Tenant 1': self.tenant1, 'Tenant 2': self.tenant2, } visibilities = ['community', 'private', 'public', 'shared'] for owner_label, owner in owners.items(): for visibility in visibilities: fixture = { 'name': '%s, %s' % (owner_label, visibility), 'owner': owner, 'visibility': visibility, } fixtures.append(fixture) return [build_image_fixture(**f) for f in fixtures] def create_images(self, images): for fixture in images: self.db_api.image_create(self.admin_context, fixture) class VisibilityTests(object): def test_unknown_admin_sees_all_but_community(self): images = self.db_api.image_get_all(self.admin_none_context) self.assertEqual(12, len(images)) def test_unknown_admin_is_public_true(self): images = self.db_api.image_get_all(self.admin_none_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_admin_is_public_false(self): images = self.db_api.image_get_all(self.admin_none_context, is_public=False) self.assertEqual(8, len(images)) for i in images: self.assertIn(i['visibility'], ['shared', 'private']) def test_unknown_admin_is_public_none(self): images = self.db_api.image_get_all(self.admin_none_context) self.assertEqual(12, len(images)) def test_unknown_admin_visibility_public(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_admin_visibility_shared(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'shared'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('shared', i['visibility']) def test_unknown_admin_visibility_private(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('private', i['visibility']) def test_unknown_admin_visibility_community(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('community', i['visibility']) def test_unknown_admin_visibility_all(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'all'}) self.assertEqual(16, len(images)) def test_known_admin_sees_all_but_others_community_images(self): images = self.db_api.image_get_all(self.admin_context) self.assertEqual(13, len(images)) def test_known_admin_is_public_true(self): images = self.db_api.image_get_all(self.admin_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_known_admin_is_public_false(self): images = self.db_api.image_get_all(self.admin_context, is_public=False) self.assertEqual(9, len(images)) for i in images: self.assertIn(i['visibility'], ['shared', 'private', 'community']) def test_known_admin_is_public_none(self): images = self.db_api.image_get_all(self.admin_context) self.assertEqual(13, len(images)) def test_admin_as_user_true(self): images = self.db_api.image_get_all(self.admin_context, admin_as_user=True) self.assertEqual(7, len(images)) for i in images: self.assertTrue(('public' == i['visibility']) or i['owner'] == self.admin_tenant) def test_known_admin_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_known_admin_visibility_shared(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'shared'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('shared', i['visibility']) def test_known_admin_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('private', i['visibility']) def test_known_admin_visibility_community(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('community', i['visibility']) def test_known_admin_visibility_all(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'all'}) self.assertEqual(16, len(images)) def test_what_unknown_user_sees(self): images = self.db_api.image_get_all(self.none_context) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_user_is_public_true(self): images = self.db_api.image_get_all(self.none_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_user_is_public_false(self): images = self.db_api.image_get_all(self.none_context, is_public=False) self.assertEqual(0, len(images)) def test_unknown_user_is_public_none(self): images = self.db_api.image_get_all(self.none_context) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_user_visibility_public(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_unknown_user_visibility_shared(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'shared'}) self.assertEqual(0, len(images)) def test_unknown_user_visibility_private(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_unknown_user_visibility_community(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('community', i['visibility']) def test_unknown_user_visibility_all(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'all'}) self.assertEqual(8, len(images)) def test_what_tenant1_sees(self): images = self.db_api.image_get_all(self.tenant1_context) self.assertEqual(7, len(images)) for i in images: if not ('public' == i['visibility']): self.assertEqual(i['owner'], self.tenant1) def test_tenant1_is_public_true(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_tenant1_is_public_false(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False) self.assertEqual(3, len(images)) for i in images: self.assertEqual(i['owner'], self.tenant1) self.assertIn(i['visibility'], ['private', 'shared', 'community']) def test_tenant1_is_public_none(self): images = self.db_api.image_get_all(self.tenant1_context) self.assertEqual(7, len(images)) for i in images: if not ('public' == i['visibility']): self.assertEqual(self.tenant1, i['owner']) def test_tenant1_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('public', i['visibility']) def test_tenant1_visibility_shared(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) self.assertEqual('shared', images[0]['visibility']) self.assertEqual(self.tenant1, images[0]['owner']) def test_tenant1_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'private'}) self.assertEqual(1, len(images)) self.assertEqual('private', images[0]['visibility']) self.assertEqual(self.tenant1, images[0]['owner']) def test_tenant1_visibility_community(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) for i in images: self.assertEqual('community', i['visibility']) def test_tenant1_visibility_all(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'all'}) self.assertEqual(10, len(images)) def _setup_is_public_red_herring(self): values = { 'name': 'Red Herring', 'owner': self.tenant1, 'visibility': 'shared', 'properties': {'is_public': 'silly'} } fixture = build_image_fixture(**values) self.db_api.image_create(self.admin_context, fixture) def test_is_public_is_a_normal_filter_for_admin(self): self._setup_is_public_red_herring() images = self.db_api.image_get_all(self.admin_context, filters={'is_public': 'silly'}) self.assertEqual(1, len(images)) self.assertEqual('Red Herring', images[0]['name']) def test_is_public_is_a_normal_filter_for_user(self): self._setup_is_public_red_herring() images = self.db_api.image_get_all(self.tenant1_context, filters={'is_public': 'silly'}) self.assertEqual(1, len(images)) self.assertEqual('Red Herring', images[0]['name']) # NOTE(markwash): the following tests are sanity checks to make sure # visibility filtering and is_public=(True|False) do not interact in # unexpected ways. However, using both of the filtering techniques # simultaneously is not an anticipated use case. def test_admin_is_public_true_and_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) def test_admin_is_public_false_and_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'public'}) self.assertEqual(0, len(images)) def test_admin_is_public_true_and_visibility_shared(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'shared'}) self.assertEqual(0, len(images)) def test_admin_is_public_false_and_visibility_shared(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'shared'}) self.assertEqual(4, len(images)) def test_admin_is_public_true_and_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_admin_is_public_false_and_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) def test_admin_is_public_true_and_visibility_community(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'community'}) self.assertEqual(0, len(images)) def test_admin_is_public_false_and_visibility_community(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) def test_tenant1_is_public_true_and_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) def test_tenant1_is_public_false_and_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'public'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_true_and_visibility_shared(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'shared'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_false_and_visibility_shared(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) def test_tenant1_is_public_true_and_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_false_and_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'private'}) self.assertEqual(1, len(images)) def test_tenant1_is_public_true_and_visibility_community(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'community'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_false_and_visibility_community(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'community'}) self.assertEqual(4, len(images)) class TestMembershipVisibility(test_utils.BaseTestCase): def setUp(self): super(TestMembershipVisibility, self).setUp() self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self._create_contexts() self._create_images() def _create_contexts(self): self.owner1, self.owner1_ctx = self._user_fixture() self.owner2, self.owner2_ctx = self._user_fixture() self.tenant1, self.user1_ctx = self._user_fixture() self.tenant2, self.user2_ctx = self._user_fixture() self.tenant3, self.user3_ctx = self._user_fixture() self.admin_tenant, self.admin_ctx = self._user_fixture(admin=True) def _user_fixture(self, admin=False): tenant_id = str(uuid.uuid4()) ctx = context.RequestContext(tenant=tenant_id, is_admin=admin) return tenant_id, ctx def _create_images(self): self.image_ids = {} for owner in [self.owner1, self.owner2]: self._create_image('not_shared', owner) self._create_image('shared-with-1', owner, members=[self.tenant1]) self._create_image('shared-with-2', owner, members=[self.tenant2]) self._create_image('shared-with-both', owner, members=[self.tenant1, self.tenant2]) def _create_image(self, name, owner, members=None): image = build_image_fixture(name=name, owner=owner, visibility='shared') self.image_ids[(owner, name)] = image['id'] self.db_api.image_create(self.admin_ctx, image) for member in members or []: member = {'image_id': image['id'], 'member': member} self.db_api.image_member_create(self.admin_ctx, member) class MembershipVisibilityTests(object): def _check_by_member(self, ctx, member_id, expected): members = self.db_api.image_member_find(ctx, member=member_id) images = [self.db_api.image_get(self.admin_ctx, member['image_id']) for member in members] facets = [(image['owner'], image['name']) for image in images] self.assertEqual(set(expected), set(facets)) def test_owner1_finding_user1_memberships(self): """Owner1 should see images it owns that are shared with User1.""" expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), ] self._check_by_member(self.owner1_ctx, self.tenant1, expected) def test_user1_finding_user1_memberships(self): """User1 should see all images shared with User1 """ expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), (self.owner2, 'shared-with-1'), (self.owner2, 'shared-with-both'), ] self._check_by_member(self.user1_ctx, self.tenant1, expected) def test_user2_finding_user1_memberships(self): """User2 should see no images shared with User1 """ expected = [] self._check_by_member(self.user2_ctx, self.tenant1, expected) def test_admin_finding_user1_memberships(self): """Admin should see all images shared with User1 """ expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), (self.owner2, 'shared-with-1'), (self.owner2, 'shared-with-both'), ] self._check_by_member(self.admin_ctx, self.tenant1, expected) def _check_by_image(self, context, image_id, expected): members = self.db_api.image_member_find(context, image_id=image_id) member_ids = [member['member'] for member in members] self.assertEqual(set(expected), set(member_ids)) def test_owner1_finding_owner1s_image_members(self): """Owner1 should see all memberships of its image """ expected = [self.tenant1, self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.owner1_ctx, image_id, expected) def test_admin_finding_owner1s_image_members(self): """Admin should see all memberships of owner1's image """ expected = [self.tenant1, self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.admin_ctx, image_id, expected) def test_user1_finding_owner1s_image_members(self): """User1 should see its own membership of owner1's image """ expected = [self.tenant1] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user1_ctx, image_id, expected) def test_user2_finding_owner1s_image_members(self): """User2 should see its own membership of owner1's image """ expected = [self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user2_ctx, image_id, expected) def test_user3_finding_owner1s_image_members(self): """User3 should see no memberships of owner1's image """ expected = [] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user3_ctx, image_id, expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/base_metadef.py0000664000175000017500000010570600000000000023162 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import os.path from glance.common import config from glance.common import exception from glance import context from glance.db.sqlalchemy import metadata import glance.tests.functional.db as db_tests from glance.tests import utils as test_utils # root of repo ROOT_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir, os.pardir, os.pardir, ) METADEFS_DIR = os.path.join(ROOT_DIR, 'etc', 'metadefs') def build_namespace_fixture(**kwargs): namespace = { 'namespace': 'MyTestNamespace', 'display_name': 'test-display-name', 'description': 'test-description', 'visibility': 'public', 'protected': 0, 'owner': 'test-owner' } namespace.update(kwargs) return namespace def build_resource_type_fixture(**kwargs): resource_type = { 'name': 'MyTestResourceType', 'protected': 0 } resource_type.update(kwargs) return resource_type def build_association_fixture(**kwargs): association = { 'name': 'MyTestResourceType', 'properties_target': 'test-properties-target', 'prefix': 'test-prefix' } association.update(kwargs) return association def build_object_fixture(**kwargs): # Full testing of required and schema done via rest api tests object = { 'namespace_id': 1, 'name': 'test-object-name', 'description': 'test-object-description', 'required': 'fake-required-properties-list', 'json_schema': '{fake-schema}' } object.update(kwargs) return object def build_property_fixture(**kwargs): # Full testing of required and schema done via rest api tests property = { 'namespace_id': 1, 'name': 'test-property-name', 'json_schema': '{fake-schema}' } property.update(kwargs) return property def build_tag_fixture(**kwargs): # Full testing of required and schema done via rest api tests tag = { 'namespace_id': 1, 'name': 'test-tag-name', } tag.update(kwargs) return tag def build_tags_fixture(tag_name_list): tag_list = [] for tag_name in tag_name_list: tag_list.append({'name': tag_name}) return tag_list class TestMetadefDriver(test_utils.BaseTestCase): """Test Driver class for Metadef tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestMetadefDriver, self).setUp() config.parse_args(args=[]) self.config(metadata_source_path=METADEFS_DIR) context_cls = context.RequestContext self.adm_context = context_cls(is_admin=True, auth_token='user:user:admin') self.context = context_cls(is_admin=False, auth_token='user:user:user') self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) def _assert_saved_fields(self, expected, actual): for k in expected.keys(): self.assertEqual(expected[k], actual[k]) class MetadefNamespaceTests(object): def test_namespace_create(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) def test_namespace_create_duplicate(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) self.assertRaises(exception.Duplicate, self.db_api.metadef_namespace_create, self.context, fixture) def test_namespace_get(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) found = self.db_api.metadef_namespace_get( self.context, created['namespace']) self.assertIsNotNone(found, "Namespace not found.") def test_namespace_get_all_with_resource_types_filter(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") rt_filters = {'resource_types': fixture['name']} found = self.db_api.metadef_namespace_get_all( self.context, filters=rt_filters, sort_key='created_at') self.assertEqual(1, len(found)) for item in found: self._assert_saved_fields(ns_fixture, item) def test_namespace_update(self): delta = {'owner': 'New Owner'} fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created['namespace']) self.assertEqual(fixture['namespace'], created['namespace']) delta_dict = copy.deepcopy(created) delta_dict.update(delta.copy()) updated = self.db_api.metadef_namespace_update( self.context, created['id'], delta_dict) self.assertEqual(delta['owner'], updated['owner']) def test_namespace_delete(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created, "Could not create a Namespace.") self.db_api.metadef_namespace_delete( self.context, created['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_namespace_get, self.context, created['namespace']) def test_namespace_delete_with_content(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self._assert_saved_fields(fixture_ns, created_ns) # Create object content for the namespace fixture_obj = build_object_fixture() created_obj = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_obj) self.assertIsNotNone(created_obj) # Create property content for the namespace fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self.assertIsNotNone(created_prop) # Create associations fixture_assn = build_association_fixture() created_assn = self.db_api.metadef_resource_type_association_create( self.context, created_ns['namespace'], fixture_assn) self.assertIsNotNone(created_assn) deleted_ns = self.db_api.metadef_namespace_delete( self.context, created_ns['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_namespace_get, self.context, deleted_ns['namespace']) class MetadefPropertyTests(object): def test_property_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self._assert_saved_fields(fixture_prop, created_prop) def test_property_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self._assert_saved_fields(fixture_prop, created_prop) self.assertRaises(exception.Duplicate, self.db_api.metadef_property_create, self.context, created_ns['namespace'], fixture_prop) def test_property_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) found_prop = self.db_api.metadef_property_get( self.context, created_ns['namespace'], created_prop['name']) self._assert_saved_fields(fixture_prop, found_prop) def test_property_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_property_fixture(namespace_id=ns_created['id']) created_p1 = self.db_api.metadef_property_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_p1, "Could not create a property.") fixture2 = build_property_fixture(namespace_id=ns_created['id'], name='test-prop-2') created_p2 = self.db_api.metadef_property_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_p2, "Could not create a property.") found = self.db_api.metadef_property_get_all( self.context, ns_created['namespace']) self.assertEqual(2, len(found)) def test_property_update(self): delta = {'name': 'New-name', 'json_schema': 'new-schema'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") delta_dict = copy.deepcopy(created_prop) delta_dict.update(delta.copy()) updated = self.db_api.metadef_property_update( self.context, created_ns['namespace'], created_prop['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) self.assertEqual(delta['json_schema'], updated['json_schema']) def test_property_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") self.db_api.metadef_property_delete( self.context, created_ns['namespace'], created_prop['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_property_get, self.context, created_ns['namespace'], created_prop['name']) def test_property_delete_namespace_content(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") self.db_api.metadef_property_delete_namespace_content( self.context, created_ns['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_property_get, self.context, created_ns['namespace'], created_prop['name']) class MetadefObjectTests(object): def test_object_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) self._assert_saved_fields(fixture_object, created_object) def test_object_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) self._assert_saved_fields(fixture_object, created_object) self.assertRaises(exception.Duplicate, self.db_api.metadef_object_create, self.context, created_ns['namespace'], fixture_object) def test_object_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) found_object = self.db_api.metadef_object_get( self.context, created_ns['namespace'], created_object['name']) self._assert_saved_fields(fixture_object, found_object) def test_object_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create(self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_object_fixture(namespace_id=ns_created['id']) created_o1 = self.db_api.metadef_object_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_o1, "Could not create an object.") fixture2 = build_object_fixture(namespace_id=ns_created['id'], name='test-object-2') created_o2 = self.db_api.metadef_object_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_o2, "Could not create an object.") found = self.db_api.metadef_object_get_all( self.context, ns_created['namespace']) self.assertEqual(2, len(found)) def test_object_update(self): delta = {'name': 'New-name', 'json_schema': 'new-schema', 'required': 'new-required'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) object_fixture = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], object_fixture) self.assertIsNotNone(created_object, "Could not create an object.") delta_dict = {} delta_dict.update(delta.copy()) updated = self.db_api.metadef_object_update( self.context, created_ns['namespace'], created_object['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) self.assertEqual(delta['json_schema'], updated['json_schema']) def test_object_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) object_fixture = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], object_fixture) self.assertIsNotNone(created_object, "Could not create an object.") self.db_api.metadef_object_delete( self.context, created_ns['namespace'], created_object['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_object_get, self.context, created_ns['namespace'], created_object['name']) class MetadefResourceTypeTests(object): def test_resource_type_get_all(self): resource_types_orig = self.db_api.metadef_resource_type_get_all( self.context) fixture = build_resource_type_fixture() self.db_api.metadef_resource_type_create(self.context, fixture) resource_types = self.db_api.metadef_resource_type_get_all( self.context) test_len = len(resource_types_orig) + 1 self.assertEqual(test_len, len(resource_types)) class MetadefResourceTypeAssociationTests(object): def test_association_create(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created) self._assert_saved_fields(ns_fixture, ns_created) assn_fixture = build_association_fixture() assn_created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], assn_fixture) self.assertIsNotNone(assn_created) self._assert_saved_fields(assn_fixture, assn_created) def test_association_create_duplicate(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created) self._assert_saved_fields(ns_fixture, ns_created) assn_fixture = build_association_fixture() assn_created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], assn_fixture) self.assertIsNotNone(assn_created) self._assert_saved_fields(assn_fixture, assn_created) self.assertRaises(exception.Duplicate, self.db_api. metadef_resource_type_association_create, self.context, ns_created['namespace'], assn_fixture) def test_association_delete(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") created_resource = self.db_api.metadef_resource_type_get( self.context, fixture['name']) self.assertIsNotNone(created_resource, "resource_type not created") self.db_api.metadef_resource_type_association_delete( self.context, ns_created['namespace'], created_resource['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_resource_type_association_get, self.context, ns_created['namespace'], created_resource['name']) def test_association_get_all_by_namespace(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") found = ( self.db_api.metadef_resource_type_association_get_all_by_namespace( self.context, ns_created['namespace'])) self.assertEqual(1, len(found)) for item in found: self._assert_saved_fields(fixture, item) class MetadefTagTests(object): def test_tag_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) self._assert_saved_fields(fixture_tag, created_tag) def test_tag_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) self._assert_saved_fields(fixture_tag, created_tag) self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create, self.context, created_ns['namespace'], fixture_tag) def test_tag_create_tags(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) created_tags = self.db_api.metadef_tag_create_tags( self.context, created_ns['namespace'], tags) actual = set([tag['name'] for tag in created_tags]) expected = set(['Tag1', 'Tag2', 'Tag3']) self.assertEqual(expected, actual) def test_tag_create_tags_with_append(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) created_tags = self.db_api.metadef_tag_create_tags( self.context, created_ns['namespace'], tags) actual = set([tag['name'] for tag in created_tags]) expected = set(['Tag1', 'Tag2', 'Tag3']) self.assertEqual(expected, actual) new_tags = build_tags_fixture(['Tag4', 'Tag5', 'Tag6']) new_created_tags = self.db_api.metadef_tag_create_tags( self.context, created_ns['namespace'], new_tags, can_append=True) actual = set([tag['name'] for tag in new_created_tags]) expected = set(['Tag4', 'Tag5', 'Tag6']) self.assertEqual(expected, actual) tags = self.db_api.metadef_tag_get_all(self.context, created_ns['namespace'], sort_key='created_at') actual = set([tag['name'] for tag in tags]) expected = set(['Tag1', 'Tag2', 'Tag3', 'Tag4', 'Tag5', 'Tag6']) self.assertEqual(expected, actual) def test_tag_create_duplicate_tags_1(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3', 'Tag2']) self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create_tags, self.context, created_ns['namespace'], tags) def test_tag_create_duplicate_tags_2(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) self.db_api.metadef_tag_create_tags(self.context, created_ns['namespace'], tags) dup_tag = build_tag_fixture(namespace_id=created_ns['id'], name='Tag3') self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create, self.context, created_ns['namespace'], dup_tag) def test_tag_create_duplicate_tags_3(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) self.db_api.metadef_tag_create_tags(self.context, created_ns['namespace'], tags) dup_tags = build_tags_fixture(['Tag3', 'Tag4', 'Tag5']) self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create_tags, self.context, created_ns['namespace'], dup_tags, can_append=True) def test_tag_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) found_tag = self.db_api.metadef_tag_get( self.context, created_ns['namespace'], created_tag['name']) self._assert_saved_fields(fixture_tag, found_tag) def test_tag_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create(self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_tag_fixture(namespace_id=ns_created['id']) created_tag1 = self.db_api.metadef_tag_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_tag1, "Could not create tag 1.") fixture2 = build_tag_fixture(namespace_id=ns_created['id'], name='test-tag-2') created_tag2 = self.db_api.metadef_tag_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_tag2, "Could not create tag 2.") found = self.db_api.metadef_tag_get_all( self.context, ns_created['namespace'], sort_key='created_at') self.assertEqual(2, len(found)) def test_tag_update(self): delta = {'name': 'New-name'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], tag_fixture) self.assertIsNotNone(created_tag, "Could not create a tag.") delta_dict = {} delta_dict.update(delta.copy()) updated = self.db_api.metadef_tag_update( self.context, created_ns['namespace'], created_tag['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) def test_tag_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], tag_fixture) self.assertIsNotNone(created_tag, "Could not create a tag.") self.db_api.metadef_tag_delete( self.context, created_ns['namespace'], created_tag['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_tag_get, self.context, created_ns['namespace'], created_tag['name']) class MetadefLoadUnloadTests: # if additional default schemas are added, you need to update this _namespace_count = 33 _namespace_object_counts = { 'OS::Compute::Quota': 3, 'OS::Software::WebServers': 3, 'OS::Software::DBMS': 12, 'OS::Software::Runtimes': 5, } _namespace_property_counts = { 'CIM::ProcessorAllocationSettingData': 3, 'CIM::ResourceAllocationSettingData': 19, 'CIM::StorageAllocationSettingData': 13, 'CIM::VirtualSystemSettingData': 17, 'OS::Compute::XenAPI': 1, 'OS::Compute::InstanceData': 2, 'OS::Compute::Libvirt': 4, 'OS::Compute::VMwareQuotaFlavor': 2, 'OS::Cinder::Volumetype': 1, 'OS::Glance::Signatures': 4, 'OS::Compute::AggregateIoOpsFilter': 1, 'OS::Compute::RandomNumberGenerator': 3, 'OS::Compute::VTPM': 2, 'OS::Compute::Hypervisor': 2, 'OS::Compute::CPUPinning': 2, 'OS::OperatingSystem': 3, 'OS::Compute::AggregateDiskFilter': 1, 'OS::Compute::AggregateNumInstancesFilter': 1, 'OS::Compute::CPUMode': 1, 'OS::Compute::HostCapabilities': 7, 'OS::Compute::VirtCPUTopology': 6, 'OS::Glance::CommonImageProperties': 10, 'OS::Compute::GuestShutdownBehavior': 1, 'OS::Compute::VMwareFlavor': 2, 'OS::Compute::TPM': 1, 'OS::Compute::GuestMemoryBacking': 1, 'OS::Compute::LibvirtImage': 17, 'OS::Compute::VMware': 6, 'OS::Compute::Watchdog': 1, } def test_metadef_load_unload(self): # load the metadata definitions metadata.db_load_metadefs(self.db_api.get_engine()) # trust but verify expected = self._namespace_count namespaces = self.db_api.metadef_namespace_get_all(self.adm_context) actual = len(namespaces) self.assertEqual( expected, actual, f"expected {expected} namespaces but got {actual}" ) for namespace in namespaces: expected = self._namespace_object_counts.get( namespace['namespace'], 0, ) objects = self.db_api.metadef_object_get_all( self.adm_context, namespace['namespace'], ) actual = len(objects) self.assertEqual( expected, actual, f"expected {expected} objects in {namespace['namespace']} " f"namespace but got {actual}: " f"{', '.join(o['name'] for o in objects)}" ) for namespace in namespaces: expected = self._namespace_property_counts.get( namespace['namespace'], 0, ) properties = self.db_api.metadef_property_get_all( self.adm_context, namespace['namespace'], ) actual = len(properties) self.assertEqual( expected, actual, f"expected {expected} properties in {namespace['namespace']} " f"namespace but got {actual}: " f"{', '.join(p['name'] for p in properties)}" ) # unload the definitions metadata.db_unload_metadefs(self.db_api.get_engine()) class MetadefDriverTests(MetadefNamespaceTests, MetadefResourceTypeTests, MetadefResourceTypeAssociationTests, MetadefPropertyTests, MetadefObjectTests, MetadefTagTests, MetadefLoadUnloadTests): # collection class pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8903067 glance-29.0.0/glance/tests/functional/db/migrations/0000775000175000017500000000000000000000000022354 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/__init__.py0000664000175000017500000000000000000000000024453 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_2024_1_expand01.py0000664000175000017500000000561600000000000026304 0ustar00zuulzuul00000000000000# Copyright (c) 2023 RedHat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils import sqlalchemy from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class Test2024_1Expand01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='2024_1_expand01') def _pre_upgrade_2024_1_expand01(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'node_reference') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'cached_images') def _check_2024_1_expand01(self, engine, data): # check that after migration, 'node_reference' and 'cached_images' # tables are created with expected columns and indexes node_reference = db_utils.get_table(engine, 'node_reference') self.assertIn('node_reference_id', node_reference.c) self.assertIn('node_reference_url', node_reference.c) self.assertTrue(db_utils.index_exists( engine, 'node_reference', 'uq_node_reference_node_reference_url'), 'Index %s on table %s does not exist' % ('uq_node_reference_node_reference_url', 'node_reference')) cached_images = db_utils.get_table(engine, 'cached_images') self.assertIn('id', cached_images.c) self.assertIn('image_id', cached_images.c) self.assertIn('last_accessed', cached_images.c) self.assertIn('last_modified', cached_images.c) self.assertIn('size', cached_images.c) self.assertIn('hits', cached_images.c) self.assertIn('checksum', cached_images.c) self.assertIn('node_reference_id', cached_images.c) self.assertTrue(db_utils.index_exists( engine, 'cached_images', 'ix_cached_images_image_id_node_reference_id'), 'Index %s on table %s does not exist' % ('ix_cached_images_image_id_node_reference_id', 'cached_images')) class Test2024_1Expand01MySQL( Test2024_1Expand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_mitaka01.py0000664000175000017500000000350600000000000025400 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures import sqlalchemy from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils def get_indexes(table, engine): inspector = sqlalchemy.inspect(engine) return [idx['name'] for idx in inspector.get_indexes(table)] class TestMitaka01Mixin(test_migrations.AlembicMigrationsMixin): def _pre_upgrade_mitaka01(self, engine): indexes = get_indexes('images', engine) self.assertNotIn('created_at_image_idx', indexes) self.assertNotIn('updated_at_image_idx', indexes) def _check_mitaka01(self, engine, data): indexes = get_indexes('images', engine) self.assertIn('created_at_image_idx', indexes) self.assertIn('updated_at_image_idx', indexes) class TestMitaka01MySQL( TestMitaka01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestMitaka01PostgresSQL( TestMitaka01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class TestMitaka01Sqlite( TestMitaka01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_mitaka02.py0000664000175000017500000000530000000000000025373 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestMitaka02Mixin(test_migrations.AlembicMigrationsMixin): def _pre_upgrade_mitaka02(self, engine): metadef_resource_types = db_utils.get_table( engine, 'metadef_resource_types') now = datetime.datetime.now() db_rec1 = dict(id='9580', name='OS::Nova::Instance', protected=False, created_at=now, updated_at=now,) db_rec2 = dict(id='9581', name='OS::Nova::Blah', protected=False, created_at=now, updated_at=now,) db_values = (db_rec1, db_rec2) with engine.connect() as conn, conn.begin(): conn.execute(metadef_resource_types.insert().values(db_values)) def _check_mitaka02(self, engine, data): metadef_resource_types = db_utils.get_table( engine, 'metadef_resource_types') with engine.connect() as conn: result = conn.execute( metadef_resource_types.select() .where(metadef_resource_types.c.name == 'OS::Nova::Instance') ).fetchall() self.assertEqual(0, len(result)) result = conn.execute( metadef_resource_types.select() .where(metadef_resource_types.c.name == 'OS::Nova::Server') ).fetchall() self.assertEqual(1, len(result)) class TestMitaka02MySQL( TestMitaka02Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestMitaka02PostgresSQL( TestMitaka02Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class TestMitaka02Sqlite( TestMitaka02Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_ocata_contract01.py0000664000175000017500000000550400000000000027116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestOcataContract01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='ocata_contract01') def _pre_upgrade_ocata_contract01(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() self.assertIn('is_public', images.c) self.assertIn('visibility', images.c) self.assertTrue(images.c.is_public.nullable) self.assertTrue(images.c.visibility.nullable) # inserting a public image record public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a private image record shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) data_migrations.migrate(engine=engine, release='ocata') def _check_ocata_contract01(self, engine, data): # check that after contract 'is_public' column is dropped images = db_utils.get_table(engine, 'images') self.assertNotIn('is_public', images.c) self.assertIn('visibility', images.c) class TestOcataContract01MySQL( TestOcataContract01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_ocata_expand01.py0000664000175000017500000001725300000000000026564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestOcataExpand01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='ocata_expand01') def _pre_upgrade_ocata_expand01(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() self.assertIn('is_public', images.c) self.assertNotIn('visibility', images.c) self.assertFalse(images.c.is_public.nullable) # inserting a public image record public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a private image record shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_before_expand') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) def _check_ocata_expand01(self, engine, data): # check that after migration, 'visibility' column is introduced images = db_utils.get_table(engine, 'images') self.assertIn('visibility', images.c) self.assertIn('is_public', images.c) self.assertTrue(images.c.is_public.nullable) self.assertTrue(images.c.visibility.nullable) # tests visibility set to None for existing images with engine.connect() as conn: rows = conn.execute( images.select().where( images.c.id.like('%_before_expand') ).order_by(images.c.id) ).fetchall() self.assertEqual(2, len(rows)) # private image first self.assertEqual(0, rows[0].is_public) self.assertEqual('private_id_before_expand', rows[0].id) self.assertIsNone(rows[0].visibility) # then public image self.assertEqual(1, rows[1].is_public) self.assertEqual('public_id_before_expand', rows[1].id) self.assertIsNone(rows[1].visibility) self._test_trigger_old_to_new(engine, images) self._test_trigger_new_to_old(engine, images) def _test_trigger_new_to_old(self, engine, images): now = datetime.datetime.now() # inserting a public image record after expand public_temp = dict(deleted=False, created_at=now, status='active', visibility='public', min_disk=0, min_ram=0, id='public_id_new_to_old') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a private image record after expand shared_temp = dict(deleted=False, created_at=now, status='active', visibility='private', min_disk=0, min_ram=0, id='private_id_new_to_old') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) # inserting a shared image record after expand shared_temp = dict(deleted=False, created_at=now, status='active', visibility='shared', min_disk=0, min_ram=0, id='shared_id_new_to_old') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) # test visibility is set appropriately by the trigger for new images with engine.connect() as conn: rows = conn.execute( images.select().where( images.c.id.like('%_new_to_old') ).order_by(images.c.id) ).fetchall() self.assertEqual(3, len(rows)) # private image first self.assertEqual(0, rows[0].is_public) self.assertEqual('private_id_new_to_old', rows[0].id) self.assertEqual('private', rows[0].visibility) # then public image self.assertEqual(1, rows[1].is_public) self.assertEqual('public_id_new_to_old', rows[1].id) self.assertEqual('public', rows[1].visibility) # then shared image self.assertEqual(0, rows[2].is_public) self.assertEqual('shared_id_new_to_old', rows[2].id) self.assertEqual('shared', rows[2].visibility) def _test_trigger_old_to_new(self, engine, images): now = datetime.datetime.now() # inserting a public image record after expand public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id_old_to_new') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a private image record after expand shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_old_to_new') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) # tests visibility is set appropriately by the trigger for new images with engine.connect() as conn: rows = conn.execute( images.select().where( images.c.id.like('%_old_to_new') ).order_by(images.c.id) ).fetchall() self.assertEqual(2, len(rows)) # private image first self.assertEqual(0, rows[0].is_public) self.assertEqual('private_id_old_to_new', rows[0].id) self.assertEqual('shared', rows[0].visibility) # then public image self.assertEqual(1, rows[1].is_public) self.assertEqual('public_id_old_to_new', rows[1].id) self.assertEqual('public', rows[1].visibility) class TestOcataExpand01MySQL( TestOcataExpand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_ocata_migrate01.py0000664000175000017500000001703200000000000026730 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestOcataMigrate01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='ocata_expand01') def _pre_upgrade_ocata_expand01(self, engine): images = db_utils.get_table(engine, 'images') image_members = db_utils.get_table(engine, 'image_members') now = datetime.datetime.now() # inserting a public image record public_temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='public_id') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(public_temp)) # inserting a non-public image record for 'shared' visibility test shared_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='shared_id') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(shared_temp)) # inserting a non-public image records for 'private' visibility test private_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_1') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(private_temp)) private_temp = dict(deleted=False, created_at=now, status='active', is_public=False, min_disk=0, min_ram=0, id='private_id_2') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(private_temp)) # adding an active as well as a deleted image member for checking # 'shared' visibility temp = dict(deleted=False, created_at=now, image_id='shared_id', member='fake_member_452', can_share=True, id=45) with engine.connect() as conn, conn.begin(): conn.execute(image_members.insert().values(temp)) temp = dict(deleted=True, created_at=now, image_id='shared_id', member='fake_member_453', can_share=True, id=453) with engine.connect() as conn, conn.begin(): conn.execute(image_members.insert().values(temp)) # adding an image member, but marking it deleted, # for testing 'private' visibility temp = dict(deleted=True, created_at=now, image_id='private_id_2', member='fake_member_451', can_share=True, id=451) with engine.connect() as conn, conn.begin(): conn.execute(image_members.insert().values(temp)) # adding an active image member for the 'public' image, # to test it remains public regardless. temp = dict(deleted=False, created_at=now, image_id='public_id', member='fake_member_450', can_share=True, id=450) with engine.connect() as conn, conn.begin(): conn.execute(image_members.insert().values(temp)) def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that visibility is null for existing images with engine.connect() as conn: rows = conn.execute( images.select().order_by(images.c.id) ).fetchall() self.assertEqual(4, len(rows)) for row in rows: self.assertIsNone(row.visibility) # run data migrations data_migrations.migrate(engine) # check that visibility is set appropriately for all images with engine.connect() as conn: rows = conn.execute( images.select().order_by(images.c.id) ).fetchall() self.assertEqual(4, len(rows)) # private_id_1 has private visibility self.assertEqual('private_id_1', rows[0].id) # TODO(rosmaita): bug #1745003 # self.assertEqual('private', rows[0].visibility) # private_id_2 has private visibility self.assertEqual('private_id_2', rows[1].id) # TODO(rosmaita): bug #1745003 # self.assertEqual('private', rows[1].visibility) # public_id has public visibility self.assertEqual('public_id', rows[2].id) # TODO(rosmaita): bug #1745003 # self.assertEqual('public', rows[2].visibility) # shared_id has shared visibility self.assertEqual('shared_id', rows[3].id) # TODO(rosmaita): bug #1745003 # self.assertEqual('shared', rows[3].visibility) class TestOcataMigrate01MySQL( TestOcataMigrate01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestOcataMigrate01_EmptyDBMixin(test_migrations.AlembicMigrationsMixin): """This mixin is used to create an initial glance database and upgrade it up to the ocata_expand01 revision. """ def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='ocata_expand01') def _pre_upgrade_ocata_expand01(self, engine): # New/empty database pass def _check_ocata_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that there are no rows in the images table with engine.connect() as conn: rows = conn.execute( images.select().order_by(images.c.id) ).fetchall() self.assertEqual(0, len(rows)) # run data migrations data_migrations.migrate(engine) class TestOcataMigrate01_EmptyDBMySQL( TestOcataMigrate01_EmptyDBMixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): """This test runs the Ocata data migrations on an empty database.""" FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_pike_contract01.py0000664000175000017500000000364500000000000026763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils import sqlalchemy from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestPikeContract01Mixin(test_migrations.AlembicMigrationsMixin): artifacts_table_names = [ 'artifact_blob_locations', 'artifact_properties', 'artifact_blobs', 'artifact_dependencies', 'artifact_tags', 'artifacts' ] def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='pike_contract01') def _pre_upgrade_pike_contract01(self, engine): # verify presence of the artifacts tables for table_name in self.artifacts_table_names: table = db_utils.get_table(engine, table_name) self.assertIsNotNone(table) def _check_pike_contract01(self, engine, data): # verify absence of the artifacts tables for table_name in self.artifacts_table_names: self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, table_name) class TestPikeContract01MySQL( TestPikeContract01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_pike_expand01.py0000664000175000017500000000340700000000000026421 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin): artifacts_table_names = [ 'artifact_blob_locations', 'artifact_properties', 'artifact_blobs', 'artifact_dependencies', 'artifact_tags', 'artifacts' ] def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='pike_expand01') def _pre_upgrade_pike_expand01(self, engine): # verify presence of the artifacts tables for table_name in self.artifacts_table_names: table = db_utils.get_table(engine, table_name) self.assertIsNotNone(table) def _check_pike_expand01(self, engine, data): # should be no changes, so re-run pre-upgrade check self._pre_upgrade_pike_expand01(engine) class TestPikeExpand01MySQL( TestPikeExpand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_pike_migrate01.py0000664000175000017500000000176100000000000026573 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures import glance.tests.functional.db.migrations.test_pike_expand01 as tpe01 import glance.tests.utils as test_utils # no TestPikeMigrate01Mixin class needed, can use TestPikeExpand01Mixin instead class TestPikeMigrate01MySQL( tpe01.TestPikeExpand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_rocky_expand01.py0000664000175000017500000000316200000000000026616 0ustar00zuulzuul00000000000000# Copyright (c) 2018 RedHat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestRockyExpand01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='rocky_expand01') def _pre_upgrade_rocky_expand01(self, engine): images = db_utils.get_table(engine, 'images') self.assertNotIn('os_hidden', images.c) def _check_rocky_expand01(self, engine, data): # check that after migration, 'os_hidden' column is introduced images = db_utils.get_table(engine, 'images') self.assertIn('os_hidden', images.c) self.assertFalse(images.c.os_hidden.nullable) class TestRockyExpand01MySQL( TestRockyExpand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_rocky_expand02.py0000664000175000017500000000332500000000000026620 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Verizon Wireless # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestRockyExpand02Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='rocky_expand02') def _pre_upgrade_rocky_expand02(self, engine): images = db_utils.get_table(engine, 'images') self.assertNotIn('os_hash_algo', images.c) self.assertNotIn('os_hash_value', images.c) def _check_rocky_expand02(self, engine, data): images = db_utils.get_table(engine, 'images') self.assertIn('os_hash_algo', images.c) self.assertTrue(images.c.os_hash_algo.nullable) self.assertIn('os_hash_value', images.c) self.assertTrue(images.c.os_hash_value.nullable) class TestRockyExpand02MySQL( TestRockyExpand02Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_train_migrate01.py0000664000175000017500000001270100000000000026754 0ustar00zuulzuul00000000000000# Copyright 2019 RedHat Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestTrainMigrate01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='train_expand01') def _pre_upgrade_train_expand01(self, engine): images = db_utils.get_table(engine, 'images') image_locations = db_utils.get_table(engine, 'image_locations') now = datetime.datetime.now() # inserting a public image record image_1 = dict(deleted=False, created_at=now, status='active', min_disk=0, min_ram=0, visibility='public', id='image_1') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(image_1)) image_2 = dict(deleted=False, created_at=now, status='active', min_disk=0, min_ram=0, visibility='public', id='image_2') with engine.connect() as conn, conn.begin(): conn.execute(images.insert().values(image_2)) # adding records to image_locations tables temp = dict(deleted=False, created_at=now, image_id='image_1', value='image_location_1', meta_data='{"backend": "fast"}', id=1) with engine.connect() as conn, conn.begin(): conn.execute(image_locations.insert().values(temp)) temp = dict(deleted=False, created_at=now, image_id='image_2', value='image_location_2', meta_data='{"backend": "cheap"}', id=2) with engine.connect() as conn, conn.begin(): conn.execute(image_locations.insert().values(temp)) def _check_train_expand01(self, engine, data): image_locations = db_utils.get_table(engine, 'image_locations') # check that meta_data has 'backend' key for existing image_locations with engine.connect() as conn: rows = conn.execute( image_locations.select().order_by(image_locations.c.id) ).fetchall() self.assertEqual(2, len(rows)) for row in rows: self.assertIn('"backend":', row.meta_data) # run data migrations data_migrations.migrate(engine, release='train') # check that meta_data has 'backend' key replaced with 'store' with engine.connect() as conn: rows = conn.execute( image_locations.select().order_by(image_locations.c.id) ).fetchall() self.assertEqual(2, len(rows)) for row in rows: self.assertNotIn('"backend":', row.meta_data) self.assertIn('"store":', row.meta_data) class TestTrainMigrate01MySQL( TestTrainMigrate01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestTrain01PostgresSQL( TestTrainMigrate01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class TestTrainMigrate01_EmptyDBMixin(test_migrations.AlembicMigrationsMixin): """This mixin is used to create an initial glance database and upgrade it up to the train_expand01 revision. """ def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='train_expand01') def _pre_upgrade_train_expand01(self, engine): # New/empty database pass def _check_train_expand01(self, engine, data): images = db_utils.get_table(engine, 'images') # check that there are no rows in the images table with engine.connect() as conn: rows = conn.execute( images.select().order_by(images.c.id) ).fetchall() self.assertEqual(0, len(rows)) # run data migrations data_migrations.migrate(engine) class TestTrainMigrate01_EmptyDBMySQL( TestTrainMigrate01_EmptyDBMixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestTrainMigrate01_PySQL( TestTrainMigrate01_EmptyDBMixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/migrations/test_wallaby_expand01.py0000664000175000017500000000445200000000000027125 0ustar00zuulzuul00000000000000# Copyright (c) 2021 RedHat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import utils as db_utils from glance.tests.functional.db import test_migrations import glance.tests.utils as test_utils class TestWallabyExpand01Mixin(test_migrations.AlembicMigrationsMixin): def _get_revisions(self, config): return test_migrations.AlembicMigrationsMixin._get_revisions( self, config, head='wallaby_expand01') def _pre_upgrade_wallaby_expand01(self, engine): tasks = db_utils.get_table(engine, 'tasks') self.assertNotIn('image_id', tasks.c) self.assertNotIn('request_id', tasks.c) self.assertNotIn('user_id', tasks.c) self.assertFalse(db_utils.index_exists(engine, 'tasks', 'ix_tasks_image_id')) def _check_wallaby_expand01(self, engine, data): # check that after migration, 'image_id', 'request_id', 'user' # columns are added tasks = db_utils.get_table(engine, 'tasks') self.assertIn('image_id', tasks.c) self.assertIn('request_id', tasks.c) self.assertIn('user_id', tasks.c) self.assertTrue(tasks.c.image_id.nullable) self.assertTrue(tasks.c.request_id.nullable) self.assertTrue(tasks.c.user_id.nullable) self.assertTrue(db_utils.index_exists(engine, 'tasks', 'ix_tasks_image_id'), 'Index %s on table %s does not exist' % ('ix_tasks_image_id', 'tasks')) class TestWallabyExpand01MySQL( TestWallabyExpand01Mixin, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/test_migrations.py0000664000175000017500000002625400000000000023776 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace # Copyright 2016 Intel Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os from alembic import command as alembic_command from alembic import script as alembic_script from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations from sqlalchemy import sql import sqlalchemy.types as types from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy.alembic_migrations import versions from glance.db.sqlalchemy import models from glance.db.sqlalchemy import models_metadef import glance.tests.utils as test_utils class TestVersions(test_utils.BaseTestCase): def test_phase_and_naming(self): """Test that migrations follow the conventional rules. Each release should have at least one file for each of the required phases, if it has one for any of them. They should also be named in a consistent way going forward. """ # These are the phases that we require. Each release should have a # version for each of these phases, even if some are empty. required_phases = set(['expand', 'migrate', 'contract']) # The initial migration is special, and mitaka was not done according # to convention. Both of those are exceptions to these rules which # need not be enforced. # NOTE(danms): Do not add anything else to this list! New migrations # should follow the rules! exception_releases = ['liberty', 'mitaka'] versions_path, _ = os.path.split(versions.__file__) version_files = os.listdir(versions_path) version_files += os.listdir(os.path.join(versions_path, '..', 'data_migrations')) releases = collections.defaultdict(set) for version_file in [v for v in version_files if v[0] != '_']: # Exception releases get ignored if any([version_file.startswith(prefix) for prefix in exception_releases]): continue # For legacy database scripts does not starts with # YYYY i.e. pre Antelope if not version_file.split('_', 2)[0].isnumeric(): # File format should be release_phaseNN_description.py try: _rest = '' # noqa release, phasever, _rest = version_file.split('_', 2) except ValueError: release = phasever = '' phase = ''.join(x for x in phasever if x.isalpha()) # Grab the non-numeric part of phaseNN if phase not in required_phases: # Help make sure that going forward developers stick to the # consistent format. self.fail('Migration files should be in the form of: ' 'release_phaseNN_some_description.py ' '(while processing %r)' % version_file) releases[release].add(phase) else: # For new database scripts i.e. Antelope onwards # File format should be # releaseYear_releaseN_phaseNN_description.py # For example 2023_1_expand01_empty.py try: _rest = '' # noqa release_y, release_n, phasever, _rest = version_file.split( '_', 3) except ValueError: release_y = phasever = '' phase = ''.join(x for x in phasever if x.isalpha()) # Grab the non-numeric part of phaseNN if phase not in required_phases: # Help make sure that going forward developers stick to the # consistent format. self.fail('Migration files should be in the form of: ' 'releaseYear_releaseN_phaseNN_description.py ' '(while processing %r)' % version_file) releases[release_y].add(phase) for release, phases in releases.items(): missing = required_phases - phases if missing: self.fail('Release %s missing migration phases %s' % ( release, ','.join(missing))) class AlembicMigrationsMixin(object): def setUp(self): super(AlembicMigrationsMixin, self).setUp() self.engine = enginefacade.writer.get_engine() def _get_revisions(self, config, head=None): head = head or 'heads' scripts_dir = alembic_script.ScriptDirectory.from_config(config) revisions = list(scripts_dir.walk_revisions(base='base', head=head)) revisions = list(reversed(revisions)) revisions = [rev.revision for rev in revisions] return revisions def _migrate_up(self, config, engine, revision, with_data=False): if with_data: data = None pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None) if pre_upgrade: data = pre_upgrade(engine) alembic_command.upgrade(config, revision) if with_data: check = getattr(self, '_check_%s' % revision, None) if check: check(engine, data) def test_walk_versions(self): alembic_config = alembic_migrations.get_alembic_config(self.engine) for revision in self._get_revisions(alembic_config): self._migrate_up(alembic_config, self.engine, revision, with_data=True) class TestMysqlMigrations(test_fixtures.OpportunisticDBTestMixin, AlembicMigrationsMixin, test_utils.BaseTestCase): FIXTURE = test_fixtures.MySQLOpportunisticFixture def test_mysql_innodb_tables(self): test_utils.db_sync(engine=self.engine) with self.engine.connect() as conn: total = conn.execute( sql.text( "SELECT COUNT(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA=:database" ), {'database': self.engine.url.database}, ) self.assertGreater(total.scalar(), 0, "No tables found. Wrong schema?") with self.engine.connect() as conn: noninnodb = conn.execute( sql.text( "SELECT count(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA=:database " "AND ENGINE!='InnoDB' " "AND TABLE_NAME!='migrate_version'" ), {'database': self.engine.url.database}, ) count = noninnodb.scalar() self.assertEqual(0, count, "%d non InnoDB tables created" % count) class TestPostgresqlMigrations(test_fixtures.OpportunisticDBTestMixin, AlembicMigrationsMixin, test_utils.BaseTestCase): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class TestSqliteMigrations(test_fixtures.OpportunisticDBTestMixin, AlembicMigrationsMixin, test_utils.BaseTestCase): pass class TestMigrations(test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase): def test_no_downgrade(self): migrate_file = versions.__path__[0] for parent, dirnames, filenames in os.walk(migrate_file): for filename in filenames: if filename.split('.')[1] == 'py': model_name = filename.split('.')[0] model = __import__( 'glance.db.sqlalchemy.alembic_migrations.versions.' + model_name) obj = getattr(getattr(getattr(getattr(getattr( model, 'db'), 'sqlalchemy'), 'alembic_migrations'), 'versions'), model_name) func = getattr(obj, 'downgrade', None) self.assertIsNone(func) class ModelsMigrationSyncMixin(object): def setUp(self): super(ModelsMigrationSyncMixin, self).setUp() self.engine = enginefacade.writer.get_engine() def get_metadata(self): for table in models_metadef.BASE_DICT.metadata.sorted_tables: models.BASE.metadata._add_table(table.name, table.schema, table) return models.BASE.metadata def get_engine(self): return self.engine def db_sync(self, engine): test_utils.db_sync(engine=engine) # TODO(akamyshikova): remove this method as soon as comparison with Variant # will be implemented in oslo.db or alembic def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): if isinstance(meta_type, types.Variant): meta_orig_type = meta_col.type insp_orig_type = insp_col.type meta_col.type = meta_type.impl insp_col.type = meta_type.impl try: return self.compare_type(ctxt, insp_col, meta_col, insp_type, meta_type.impl) finally: meta_col.type = meta_orig_type insp_col.type = insp_orig_type else: ret = super(ModelsMigrationSyncMixin, self).compare_type( ctxt, insp_col, meta_col, insp_type, meta_type) if ret is not None: return ret return ctxt.impl.compare_type(insp_col, meta_col) def include_object(self, object_, name, type_, reflected, compare_to): if name in ['migrate_version'] and type_ == 'table': return False return True class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase): FIXTURE = test_fixtures.MySQLOpportunisticFixture class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_utils.BaseTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/db/test_sqlalchemy.py0000664000175000017500000005363500000000000023767 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import options from oslo_utils.fixture import uuidsentinel as uuids from glance.common import exception from glance import context as glance_context import glance.db.sqlalchemy.api from glance.db.sqlalchemy import models as db_models from glance.db.sqlalchemy import models_metadef as metadef_models import glance.tests.functional.db as db_tests from glance.tests.functional.db import base from glance.tests.functional.db import base_metadef CONF = cfg.CONF def get_db(config): options.set_defaults(CONF, connection='sqlite://') config(debug=False) db_api = glance.db.sqlalchemy.api return db_api def reset_db(db_api): db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def reset_db_metadef(db_api): metadef_models.unregister_models(db_api.get_engine()) metadef_models.register_models(db_api.get_engine()) class TestSqlAlchemyDriver(base.TestDriver, base.DriverTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyDriver, self).setUp() self.addCleanup(db_tests.reset) def test_get_image_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, image_id) def test_image_tag_delete_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, self.context, image_id, 'fake') def test_image_tag_get_all_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.image_tag_get_all, self.context, image_id) def test_user_get_storage_usage_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.user_get_storage_usage, self.context, 'fake_owner_id', image_id) class TestSqlAlchemyVisibility(base.TestVisibility, base.VisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyMembershipVisibility(base.TestMembershipVisibility, base.MembershipVisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyMembershipVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyDBDataIntegrity(base.TestDriver, base.FunctionalInitWrapper): """Test class for checking the data integrity in the database. Helpful in testing scenarios specific to the sqlalchemy api. """ def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyDBDataIntegrity, self).setUp() self.addCleanup(db_tests.reset) def test_paginate_redundant_sort_keys(self): original_method = self.db_api._paginate_query def fake_paginate_query(query, model, limit, sort_keys, marker, sort_dir, sort_dirs): self.assertEqual(['created_at', 'id'], sort_keys) return original_method(query, model, limit, sort_keys, marker, sort_dir, sort_dirs) self.mock_object(self.db_api, '_paginate_query', fake_paginate_query) self.db_api.image_get_all(self.context, sort_key=['created_at']) def test_paginate_non_redundant_sort_keys(self): original_method = self.db_api._paginate_query def fake_paginate_query(query, model, limit, sort_keys, marker, sort_dir, sort_dirs): self.assertEqual(['name', 'created_at', 'id'], sort_keys) return original_method(query, model, limit, sort_keys, marker, sort_dir, sort_dirs) self.mock_object(self.db_api, '_paginate_query', fake_paginate_query) self.db_api.image_get_all(self.context, sort_key=['name']) class TestSqlAlchemyTask(base.TaskTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyTask, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyQuota(base.DriverQuotaTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyQuota, self).setUp() self.addCleanup(db_tests.reset) class TestDBPurge(base.DBPurgeTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestDBPurge, self).setUp() self.addCleanup(db_tests.reset) class TestMetadefSqlAlchemyDriver(base_metadef.TestMetadefDriver, base_metadef.MetadefDriverTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db_metadef) super(TestMetadefSqlAlchemyDriver, self).setUp() self.addCleanup(db_tests.reset) class TestImageCacheOperations(base.TestDriver, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestImageCacheOperations, self).setUp() self.addCleanup(db_tests.reset) # Create two images self.images = [] for num in range(0, 2): size = 100 image = self.db_api.image_create( self.adm_context, {'status': 'active', 'owner': self.adm_context.owner, 'size': size, 'name': 'test-%s-%i' % ('active', num)}) self.images.append(image) # Create two node_references self.node_references = [ self.db_api.node_reference_create( self.adm_context, 'node_url_1'), self.db_api.node_reference_create( self.adm_context, 'node_url_2'), ] # Cache two images on node_url_1 for node in self.node_references: if node['node_reference_url'] == 'node_url_2': continue for image in self.images: self.db_api.insert_cache_details( self.adm_context, 'node_url_1', image['id'], image['size'], hits=3) def test_node_reference_get_by_url(self): node_reference = self.db_api.node_reference_get_by_url( self.adm_context, 'node_url_1') self.assertEqual('node_url_1', node_reference['node_reference_url']) def test_node_reference_get_by_url_not_found(self): self.assertRaises(exception.NotFound, self.db_api.node_reference_get_by_url, self.adm_context, 'garbage_url') def test_get_cached_images(self): # Two images are cached on node 'node_url_1' cached_images = self.db_api.get_cached_images( self.adm_context, 'node_url_1') self.assertEqual(2, len(cached_images)) # Nothing is cached on node 'node_url_2' cached_images = self.db_api.get_cached_images( self.adm_context, 'node_url_2') self.assertEqual(0, len(cached_images)) def test_get_hit_count(self): # Hit count will be 3 for image on node_url_1 hit_count = self.db_api.get_hit_count( self.adm_context, self.images[0]['id'], 'node_url_1') self.assertEqual(3, hit_count) # Hit count will be 0 for image on node_url_2 hit_count = self.db_api.get_hit_count( self.adm_context, self.images[0]['id'], 'node_url_2') self.assertEqual(0, hit_count) def test_delete_all_cached_images(self): # delete all images from node_url_1 self.db_api.delete_all_cached_images( self.adm_context, 'node_url_1') # Verify all images are deleted cached_images = self.db_api.get_cached_images( self.adm_context, 'node_url_1') self.assertEqual(0, len(cached_images)) def test_delete_cached_image(self): # Delete cached image from node_url_1 self.db_api.delete_cached_image( self.adm_context, self.images[0]['id'], 'node_url_1') # verify that image is deleted self.assertFalse(self.db_api.is_image_cached_for_node( self.adm_context, 'node_url_1', self.images[0]['id'])) def test_get_least_recently_accessed(self): recently_accessed = self.db_api.get_least_recently_accessed( self.adm_context, 'node_url_1') # Verify we get last cached image in response self.assertEqual(self.images[0]['id'], recently_accessed) def test_is_image_cached_for_node(self): # Verify image is cached for node_url_1 self.assertTrue(self.db_api.is_image_cached_for_node( self.adm_context, 'node_url_1', self.images[0]['id'])) # Verify image is not cached for node_url_2 self.assertFalse(self.db_api.is_image_cached_for_node( self.adm_context, 'node_url_2', self.images[0]['id'])) def test_update_hit_count(self): # Verify image on node_url_1 has 3 as hit count hit_count = self.db_api.get_hit_count( self.adm_context, self.images[0]['id'], 'node_url_1') self.assertEqual(3, hit_count) # Update the hit count of UUID1 self.db_api.update_hit_count( self.adm_context, self.images[0]['id'], 'node_url_1') # Verify hit count is now 4 hit_count = self.db_api.get_hit_count( self.adm_context, self.images[0]['id'], 'node_url_1') self.assertEqual(4, hit_count) class TestImageAtomicOps(base.TestDriver, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestImageAtomicOps, self).setUp() self.addCleanup(db_tests.reset) self.image = self.db_api.image_create( self.adm_context, {'status': 'active', 'owner': self.adm_context.owner, 'properties': {'speed': '88mph'}}) @staticmethod def _propdict(list_of_props): """ Convert a list of ImageProperty objects to dict, ignoring deleted values. """ return {x.name: x.value for x in list_of_props if x.deleted == 0} def assertOnlyImageHasProp(self, image_id, name, value): images_with_prop = self.db_api.image_get_all( self.adm_context, {'properties': {name: value}}) self.assertEqual(1, len(images_with_prop)) self.assertEqual(image_id, images_with_prop[0]['id']) def test_update(self): """Try to double-create a property atomically. This should ensure that a second attempt to create the property atomically fails with Duplicate. """ # Atomically create the property self.db_api.image_set_property_atomic(self.image['id'], 'test_property', 'foo') # Make sure only the matched image got it self.assertOnlyImageHasProp(self.image['id'], 'test_property', 'foo') # Trying again should fail self.assertRaises(exception.Duplicate, self.db_api.image_set_property_atomic, self.image['id'], 'test_property', 'bar') # Ensure that only the first one stuck image = self.db_api.image_get(self.adm_context, self.image['id']) self.assertEqual({'speed': '88mph', 'test_property': 'foo'}, self._propdict(image['properties'])) self.assertOnlyImageHasProp(self.image['id'], 'test_property', 'foo') def test_update_drop_update(self): """Try to create, delete, re-create property atomically. If we fail to undelete and claim the property, this will fail as duplicate. """ # Atomically create the property self.db_api.image_set_property_atomic(self.image['id'], 'test_property', 'foo') # Ensure that it stuck image = self.db_api.image_get(self.adm_context, self.image['id']) self.assertEqual({'speed': '88mph', 'test_property': 'foo'}, self._propdict(image['properties'])) self.assertOnlyImageHasProp(self.image['id'], 'test_property', 'foo') # Update the image with the property removed, like image_repo.save() new_props = self._propdict(image['properties']) del new_props['test_property'] self.db_api.image_update(self.adm_context, self.image['id'], values={'properties': new_props}, purge_props=True) # Make sure that a fetch shows the property deleted image = self.db_api.image_get(self.adm_context, self.image['id']) self.assertEqual({'speed': '88mph'}, self._propdict(image['properties'])) # Atomically update the property, which still exists, but is # deleted self.db_api.image_set_property_atomic(self.image['id'], 'test_property', 'bar') # Makes sure we updated the property and undeleted it image = self.db_api.image_get(self.adm_context, self.image['id']) self.assertEqual({'speed': '88mph', 'test_property': 'bar'}, self._propdict(image['properties'])) self.assertOnlyImageHasProp(self.image['id'], 'test_property', 'bar') def test_update_prop_multiple_images(self): """Create and delete properties on two images, then set on one. This tests that the resurrect-from-deleted mode of the method only matches deleted properties from our image. """ images = self.db_api.image_get_all(self.adm_context) image_id1 = images[0]['id'] image_id2 = images[-1]['id'] # Atomically create the property on each image self.db_api.image_set_property_atomic(image_id1, 'test_property', 'foo') self.db_api.image_set_property_atomic(image_id2, 'test_property', 'bar') # Make sure they got the right property value each self.assertOnlyImageHasProp(image_id1, 'test_property', 'foo') self.assertOnlyImageHasProp(image_id2, 'test_property', 'bar') # Delete the property on both images self.db_api.image_update(self.adm_context, image_id1, {'properties': {}}, purge_props=True) self.db_api.image_update(self.adm_context, image_id2, {'properties': {}}, purge_props=True) # Set the property value on one of the images. Both will have a # deleted previous value for the property, but only one should # be updated self.db_api.image_set_property_atomic(image_id2, 'test_property', 'baz') # Make sure the update affected only the intended image self.assertOnlyImageHasProp(image_id2, 'test_property', 'baz') def test_delete(self): """Try to double-delete a property atomically. This should ensure that a second attempt fails. """ self.db_api.image_delete_property_atomic(self.image['id'], 'speed', '88mph') self.assertRaises(exception.NotFound, self.db_api.image_delete_property_atomic, self.image['id'], 'speed', '88mph') def test_delete_create_delete(self): """Try to delete, re-create, and then re-delete property.""" self.db_api.image_delete_property_atomic(self.image['id'], 'speed', '88mph') self.db_api.image_update(self.adm_context, self.image['id'], {'properties': {'speed': '89mph'}}, purge_props=True) # We should no longer be able to delete the property by the *old* # value self.assertRaises(exception.NotFound, self.db_api.image_delete_property_atomic, self.image['id'], 'speed', '88mph') # Only the new value should result in proper deletion self.db_api.image_delete_property_atomic(self.image['id'], 'speed', '89mph') def test_image_update_ignores_atomics(self): image = self.db_api.image_get_all(self.adm_context)[0] # Set two atomic properties atomically self.db_api.image_set_property_atomic(image['id'], 'test1', 'foo') self.db_api.image_set_property_atomic(image['id'], 'test2', 'bar') # Try to change test1, delete test2, add test3 and test4 via # normal image_update() where the first three are passed as # atomic self.db_api.image_update( self.adm_context, image['id'], {'properties': {'test1': 'baz', 'test3': 'bat', 'test4': 'yep'}}, purge_props=True, atomic_props=['test1', 'test2', 'test3']) # Expect that none of the updates to the atomics are applied, but # the regular property is added. image = self.db_api.image_get(self.adm_context, image['id']) self.assertEqual({'test1': 'foo', 'test2': 'bar', 'test4': 'yep'}, self._propdict(image['properties'])) class TestImageStorageUsage(base.TestDriver, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestImageStorageUsage, self).setUp() self.addCleanup(db_tests.reset) self.contexts = {} for owner in (uuids.owner1, uuids.owner2): ctxt = glance_context.RequestContext(project_id=owner) self.contexts[owner] = ctxt statuses = ['queued', 'active', 'uploading', 'importing', 'deleted'] for status in statuses: for num in range(0, 2): # Make the size of each image differ by status # so we can make sure we count the right one. size = statuses.index(status) * 100 image = self.db_api.image_create( ctxt, {'status': status, 'owner': owner, 'size': size, 'name': 'test-%s-%i' % (status, num)}) if status == 'active': # Active images get one location, active if they # are the first. The first image is also copying # to another store. loc_status = num == 0 and 'active' or 'deleted' self.db_api.image_location_add( ctxt, image['id'], {'url': 'foo://bar', 'metadata': {}, 'status': loc_status}) self.db_api.image_set_property_atomic( image['id'], 'os_glance_importing_to_stores', num == 0 and 'fakestore' or '') def test_get_staging_usage(self): for owner, ctxt in self.contexts.items(): usage = self.db_api.user_get_staging_usage(ctxt, ctxt.owner) # Each user has two staged images of size 200 each, plus one # active image of size 100 that is copying, and two importing # of size 300. self.assertEqual(1100, usage) def test_get_storage_usage(self): for owner, ctxt in self.contexts.items(): usage = self.db_api.user_get_storage_usage(ctxt, ctxt.owner) # Each user has two active images of size 100 each, but only one # has an active location. self.assertEqual(100, usage) def test_get_image_count(self): for owner, ctxt in self.contexts.items(): count = self.db_api.user_get_image_count(ctxt, ctxt.owner) # Each user has two active images, two staged images, two # importing, and two queued images self.assertEqual(8, count) def test_get_uploading_count(self): for owner, ctxt in self.contexts.items(): count = self.db_api.user_get_uploading_count(ctxt, ctxt.owner) # Each user has two staged images, one image being copied, # and two importing. self.assertEqual(5, count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/ft_utils.py0000664000175000017500000002242300000000000022021 0ustar00zuulzuul00000000000000# Copyright 2018 Verizon Wireless # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import time from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import timeutils import requests LOG = logging.getLogger(__name__) def verify_image_hashes_and_status( test_obj, image_id, checksum=None, os_hash_value=None, status=None, os_hash_algo='sha512', size=None): """Makes image-detail request and checks response. :param test_obj: The test object; expected to have _url() and _headers() defined on it :param image_id: Image id to use in the request :param checksum: Expected checksum (default: None) :param os_hash_value: Expected multihash value (default: None) :param status: Expected status (default: None) :param os_hash_algo: Expected value of os_hash_algo; only checked when os_hash_value is not None (default: 'sha512') """ path = test_obj._url('/v2/images/%s' % image_id) response = requests.get(path, headers=test_obj._headers()) test_obj.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) test_obj.assertEqual(checksum, image['checksum']) if os_hash_value: # make sure we're using the hashing_algorithm we expect test_obj.assertEqual(str(os_hash_algo), image['os_hash_algo']) test_obj.assertEqual(os_hash_value, image['os_hash_value']) test_obj.assertEqual(status, image['status']) test_obj.assertEqual(size, image['size']) def wait_for_status(test_obj, request_path, request_headers, status=None, max_sec=10, delay_sec=0.2, start_delay_sec=None, multistore=False): """ Performs a time-bounded wait for the entity at the request_path to reach the requested status. :param test_obj: The test object; expected to have _url() and _headers() defined on it :param request_path: path to use to make the request :param request_headers: headers to use when making the request :param status: the status to wait for (default: 'active') :param max_sec: the maximum number of seconds to wait (default: 10) :param delay_sec: seconds to sleep before the next request is made (default: 0.2) :param start_delay_sec: seconds to wait before making the first request (default: None) :multistore: Optional flag if multiple backends enabled :raises Exception: if the entity fails to reach the status within the requested time or if the server returns something other than a 200 response """ start_time = time.time() done_time = start_time + max_sec if start_delay_sec: time.sleep(start_delay_sec) while time.time() <= done_time: if multistore: resp = test_obj.api_get(request_path, headers=request_headers) else: resp = requests.get(request_path, headers=request_headers) if resp.status_code != http.OK: raise Exception("Received {} response from server".format( resp.status_code)) test_obj.assertEqual(http.OK, resp.status_code) entity = jsonutils.loads(resp.text) LOG.info('Image status is: %s', entity['status']) if entity['checksum'] and entity['status'] == 'active': return if entity['status'] == status: return time.sleep(delay_sec) entity_id = request_path.rsplit('/', 1)[1] msg = "Entity {0} failed to reach status '{1}' within {2} sec" raise Exception(msg.format(entity_id, status, max_sec)) def wait_for_image_checksum_and_status(test_obj, image_id, status=None, max_sec=10, delay_sec=0.2, start_delay_sec=None, multistore=False): """ Performs a time-bounded wait for the entity at the request_path to wait until image hash and checksum calculation. :param test_obj: The test object; expected to have _url() and _headers() defined on it :param image_id: Image id to use in the request :param status: Expected status (default: None) :param max_sec: the maximum number of seconds to wait (default: 10) :param delay_sec: seconds to sleep before the next request is made (default: 0.2) :param start_delay_sec: seconds to wait before making the first request (default: None) :multistore: Optional flag if multiple backends enabled """ if multistore: path = '/v2/images/%s' % image_id else: path = test_obj._url('/v2/images/%s' % image_id) start_time = time.time() done_time = start_time + max_sec if start_delay_sec: time.sleep(start_delay_sec) while time.time() <= done_time: if multistore: resp = test_obj.api_get(path, headers=test_obj._headers()) else: resp = requests.get(path, headers=test_obj._headers()) test_obj.assertEqual(http.OK, resp.status_code) image = jsonutils.loads(resp.text) LOG.info('Image status is: %s', image['status']) if image['checksum'] and image['status'] == status: return time.sleep(delay_sec) if image['checksum'] is None: msg = ("Entity {0} failed to complete hash caclulation " "within {1} sec") raise Exception(msg.format(image_id, max_sec)) if image['status'] != status: msg = "Entity {0} failed to reach status '{1}' within {2} sec" raise Exception(msg.format(image_id, status, max_sec)) def wait_for_copying(request_path, request_headers, stores=[], max_sec=10, delay_sec=0.2, start_delay_sec=None, failure_scenario=False): """ Performs a time-bounded wait for the entity at the request_path to wait until image is copied to specified stores. :param request_path: path to use to make the request :param request_headers: headers to use when making the request :param stores: list of stores to copy :param max_sec: the maximum number of seconds to wait (default: 10) :param delay_sec: seconds to sleep before the next request is made (default: 0.2) :param start_delay_sec: seconds to wait before making the first request (default: None) :raises Exception: if the entity fails to reach the status within the requested time or if the server returns something other than a 200 response """ start_time = time.time() done_time = start_time + max_sec if start_delay_sec: time.sleep(start_delay_sec) while time.time() <= done_time: resp = requests.get(request_path, headers=request_headers) if resp.status_code != http.OK: raise Exception("Received {} response from server".format( resp.status_code)) entity = jsonutils.loads(resp.text) all_copied = all([store in entity['stores'] for store in stores]) if all_copied: return time.sleep(delay_sec) if not failure_scenario: entity_id = request_path.rsplit('/', 1)[1] msg = "Entity {0} failed to copy image to stores '{1}' within {2} sec" raise Exception(msg.format(entity_id, ",".join(stores), max_sec)) def poll_entity(url, headers, callback, max_sec=10, delay_sec=0.2, require_success=True): """Poll a given URL passing the parsed entity to a callback. This is a utility method that repeatedly GETs a URL, and calls a callback with the result. The callback determines if we should keep polling by returning True (up to the timeout). :param url: The url to fetch :param headers: The request headers to use for the fetch :param callback: A function that takes the parsed entity and is expected to return True if we should keep polling :param max_sec: The overall timeout before we fail :param delay_sec: The time between fetches :param require_success: Assert resp_code is http.OK each time before calling the callback """ timer = timeutils.StopWatch(max_sec) timer.start() while not timer.expired(): resp = requests.get(url, headers=headers) if require_success and resp.status_code != http.OK: raise Exception( 'Received %i response from server' % resp.status_code) entity = resp.json() keep_polling = callback(entity) if keep_polling is not True: return keep_polling time.sleep(delay_sec) raise Exception('Poll timeout if %i seconds exceeded!' % max_sec) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8903067 glance-29.0.0/glance/tests/functional/image_cache/0000775000175000017500000000000000000000000022020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/image_cache/__init__.py0000664000175000017500000000000000000000000024117 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8903067 glance-29.0.0/glance/tests/functional/image_cache/drivers/0000775000175000017500000000000000000000000023476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/image_cache/drivers/__init__.py0000664000175000017500000000000000000000000025575 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/image_cache/drivers/test_centralized_db.py0000664000175000017500000005271300000000000030070 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import errno import io import os import time from unittest import mock from oslo_utils import fileutils from glance.image_cache.drivers import centralized_db from glance.tests import functional DATA = b'IMAGEDATA' class TestCentralizedDb(functional.SynchronousAPIBase): # ToDo(abhishekk): Once system scope is enabled and RBAC is fully # supported, enable these tests for RBAC as well def setUp(self): super(TestCentralizedDb, self).setUp() def start_server(self, enable_cache=True, set_worker_url=True): if set_worker_url: self.config(worker_self_reference_url='http://workerx') self.config(image_cache_driver='centralized_db') super(TestCentralizedDb, self).start_server(enable_cache=enable_cache) def load_data(self): output = {} # Create 1 queued image as well for testing path = "/v2/images" data = { 'name': 'queued-image', 'container_format': 'bare', 'disk_format': 'raw' } response = self.api_post(path, json=data) self.assertEqual(201, response.status_code) image_id = response.json['id'] output['queued'] = image_id for visibility in ['public', 'private']: data = { 'name': '%s-image' % visibility, 'visibility': visibility, 'container_format': 'bare', 'disk_format': 'raw' } response = self.api_post(path, json=data) self.assertEqual(201, response.status_code) image_id = response.json['id'] # Upload some data to image response = self.api_put( '/v2/images/%s/file' % image_id, headers={'Content-Type': 'application/octet-stream'}, data=DATA) self.assertEqual(204, response.status_code) output[visibility] = image_id return output def wait_for_caching(self, image_id, max_sec=10, delay_sec=0.2, start_delay_sec=None): start_time = time.time() done_time = start_time + max_sec if start_delay_sec: time.sleep(start_delay_sec) while time.time() <= done_time: output = self.list_cache()['cached_images'] output = [image['image_id'] for image in output] if output and image_id in output: return time.sleep(delay_sec) msg = "Image {0} failed to cached within {1} sec" raise Exception(msg.format(image_id, max_sec)) def list_cache(self, expected_code=200): path = '/v2/cache' response = self.api_get(path) self.assertEqual(expected_code, response.status_code) if response.status_code == 200: return response.json def test_centralized_db_worker_url_not_set(self): try: self.config(image_cache_driver='centralized_db') self.start_server(enable_cache=True, set_worker_url=False) except RuntimeError as e: expected_message = "'worker_self_reference_url' needs to be set " \ "if `centralized_db` is defined as cache " \ "driver for image_cache_driver config option." self.assertIn(expected_message, e.args) def test_centralized_db_verify_worker_node_is_set(self): self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.assertEqual( 'http://workerx', self.driver.db_api.node_reference_get_by_url( self.driver.context, 'http://workerx').node_reference_url) def test_get_cache_size(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify initially cache size is 0 self.assertEqual(0, self.driver.get_cache_size()) # Cache one image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # Verify cache size is equal to len(DATA) i.e. 9 self.assertEqual(len(DATA), self.driver.get_cache_size()) def test_get_hit_count(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify that hit count is currently 0 as no image is cached self.assertEqual(0, self.driver.get_hit_count(images['public'])) # Cache one image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) # Verify that hit count is still 0 as image is cached, but # not downloaded yet from cache self.assertEqual(0, self.driver.get_hit_count(images['public'])) # Download the image path = '/v2/images/%s/file' % images['public'] response = self.api_get(path) self.assertEqual('IMAGEDATA', response.text) # Verify that hit count is 1 as we hit the cache self.assertEqual(1, self.driver.get_hit_count(images['public'])) # Download the image again path = '/v2/images/%s/file' % images['public'] response = self.api_get(path) self.assertEqual('IMAGEDATA', response.text) # Verify that hit count is 2 as we hit the cache self.assertEqual(2, self.driver.get_hit_count(images['public'])) def test_get_cached_images(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify that initially there are no cached image(s) self.assertEqual(0, len(self.driver.get_cached_images())) # Cache one image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) # Verify that there is one cached imgae now self.assertEqual(1, len(self.driver.get_cached_images())) # Verify that passing non-existing node will be # returned as 0 cached images self.config(worker_self_reference_url="http://fake-worker") self.assertEqual(0, len(self.driver.get_cached_images())) def test_is_cacheable(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify that is_cacheable will return true as image is not cached yet self.assertTrue(self.driver.is_cacheable(images['public'])) # Now cache the image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) # Verify that now above image is not cachable self.assertFalse(self.driver.is_cacheable(images['public'])) def test_is_being_cached(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify that is_being_cached will return False as # image is not cached yet self.assertFalse(self.driver.is_being_cached(images['public'])) def test_is_queued(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify that is_queued will return False as # image is not queued for caching yet self.assertFalse(self.driver.is_queued(images['public'])) # Now queue image for caching path = '/v2/cache/%s' % images['public'] self.api_put(path) self.assertTrue(self.driver.is_queued(images['public'])) def test_delete_cached_image(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify deleting non-existing image from cache will not fail self.driver.delete_cached_image('fake-image-id') # Now cache the image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) self.assertEqual(1, len(self.driver.get_cached_images())) # Delete the image from cache self.driver.delete_cached_image(images['public']) # Verify image is deleted from cache self.assertFalse(self.driver.is_cached(images['public'])) self.assertEqual(0, len(self.driver.get_cached_images())) def test_delete_all_cached_images(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify no image is cached yet self.assertEqual(0, len(self.driver.get_cached_images())) # Verify delete call should not fail even if no images are cached self.driver.delete_all_cached_images() # Now cache the image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) self.assertEqual(1, len(self.driver.get_cached_images())) # Now cache another image path = '/v2/cache/%s' % images['private'] self.api_put(path) self.wait_for_caching(images['private']) # verify image is cached self.assertTrue(self.driver.is_cached(images['private'])) self.assertEqual(2, len(self.driver.get_cached_images())) # Delete all the images form cache self.driver.delete_all_cached_images() # Verify images are deleted from cache self.assertEqual(0, len(self.driver.get_cached_images())) def test_delete_queued_image(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify deleting non-existing image from queued dir will not fail self.driver.delete_queued_image('fake-image-id') # Now queue imgae for caching path = '/v2/cache/%s' % images['public'] self.api_put(path) # verify image is queued self.assertTrue(self.driver.is_queued(images['public'])) self.assertEqual(1, len(self.driver.get_queued_images())) # Delete the image from queued dir self.driver.delete_queued_image(images['public']) # Verify image is deleted from cache self.assertFalse(self.driver.is_queued(images['public'])) self.assertEqual(0, len(self.driver.get_queued_images())) def test_delete_all_queued_images(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify no image is cached yet self.assertEqual(0, len(self.driver.get_queued_images())) # Verify delete call should not fail even if no images are queued self.driver.delete_all_queued_images() # Now queue the image path = '/v2/cache/%s' % images['public'] self.api_put(path) # verify image is queued self.assertTrue(self.driver.is_queued(images['public'])) self.assertEqual(1, len(self.driver.get_queued_images())) # Now queue another image path = '/v2/cache/%s' % images['private'] self.api_put(path) # verify image is queued self.assertTrue(self.driver.is_queued(images['private'])) self.assertEqual(2, len(self.driver.get_queued_images())) # Delete all the images form queued dir self.driver.delete_all_queued_images() # Verify images are deleted from cache self.assertEqual(0, len(self.driver.get_queued_images())) def test_clean(self): self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() cache_dir = os.path.join(self.test_dir, 'cache') incomplete_file_path = os.path.join(cache_dir, 'incomplete', '1') incomplete_file = open(incomplete_file_path, 'wb') incomplete_file.write(DATA) incomplete_file.close() self.assertTrue(os.path.exists(incomplete_file_path)) self.delay_inaccurate_clock() self.driver.clean(stall_time=0) self.assertFalse(os.path.exists(incomplete_file_path)) def _test_clean_stall_time( self, stall_time=None, days=2, stall_failed=False): """ Test the clean method removes the stalled images as expected """ self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() cache_dir = os.path.join(self.test_dir, 'cache') incomplete_file_path_1 = os.path.join(cache_dir, 'incomplete', '1') incomplete_file_path_2 = os.path.join(cache_dir, 'incomplete', '2') for f in (incomplete_file_path_1, incomplete_file_path_2): incomplete_file = open(f, 'wb') incomplete_file.write(DATA) incomplete_file.close() mtime = os.path.getmtime(incomplete_file_path_1) pastday = (datetime.datetime.fromtimestamp(mtime) - datetime.timedelta(days=days)) atime = int(time.mktime(pastday.timetuple())) mtime = atime os.utime(incomplete_file_path_1, (atime, mtime)) self.assertTrue(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) # If stall_time is None then it will wait for default time # of `image_cache_stall_time` which is 24 hours if stall_failed: with mock.patch.object( fileutils, 'delete_if_exists') as mock_delete: mock_delete.side_effect = OSError(errno.ENOENT, '') self.driver.clean(stall_time=stall_time) self.assertTrue(os.path.exists(incomplete_file_path_1)) else: self.driver.clean(stall_time=stall_time) self.assertFalse(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) def test_clean_stalled_none_stall_time(self): self._test_clean_stall_time() def test_clean_stalled_nonzero_stall_time(self): """Test the clean method removes expected images.""" self._test_clean_stall_time(stall_time=3600, days=1) def test_clean_stalled_fails(self): """Test the clean method fails to delete file, ignores the failure""" self._test_clean_stall_time(stall_time=3600, days=1, stall_failed=True) def test_least_recently_accessed(self): self.start_server(enable_cache=True) images = self.load_data() self.driver = centralized_db.Driver() self.driver.configure() # Verify no image is cached yet self.assertEqual(0, len(self.driver.get_cached_images())) # Verify delete call should not fail even if no images are cached self.driver.delete_all_cached_images() # Now cache the image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) self.assertEqual(1, len(self.driver.get_cached_images())) # Now cache another image path = '/v2/cache/%s' % images['private'] self.api_put(path) self.wait_for_caching(images['private']) # verify image is cached self.assertTrue(self.driver.is_cached(images['private'])) self.assertEqual(2, len(self.driver.get_cached_images())) # Verify that 1st image will be returned image_id, size = self.driver.get_least_recently_accessed() self.assertEqual(images['public'], image_id) self.assertEqual(len(DATA), size) def test_open_for_write_good(self): """ Test to see if open_for_write works in normal case """ self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() # test a good case image_id = '1' self.assertFalse(self.driver.is_cached(image_id)) with self.driver.open_for_write(image_id) as cache_file: cache_file.write(b'a') self.assertTrue(self.driver.is_cached(image_id), "Image %s was NOT cached!" % image_id) # make sure it has tidied up cache_dir = os.path.join(self.test_dir, 'cache') incomplete_file_path = os.path.join(cache_dir, 'incomplete', image_id) cache_file_path = os.path.join(cache_dir, image_id) invalid_file_path = os.path.join(cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertFalse(os.path.exists(invalid_file_path)) self.assertTrue(os.path.exists(cache_file_path)) def test_open_for_write_with_exception(self): """ Test to see if open_for_write works in a failure case for each driver This case is where an exception is raised while the file is being written. The image is partially filled in cache and filling won't resume so verify the image is moved to invalid/ directory """ # test a case where an exception is raised while the file is open self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() image_id = '1' self.assertFalse(self.driver.is_cached(image_id)) try: with self.driver.open_for_write(image_id): raise IOError except Exception as e: self.assertIsInstance(e, IOError) self.assertFalse(self.driver.is_cached(image_id), "Image %s was cached!" % image_id) # make sure it has tidied up cache_dir = os.path.join(self.test_dir, 'cache') incomplete_file_path = os.path.join(cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertTrue(os.path.exists(invalid_file_path)) def test_open_for_read_good(self): self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() images = self.load_data() self.assertFalse(self.driver.is_cached(images['public'])) # Cache one image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) # verify cache hit count for above image is 0 self.assertEqual(0, self.driver.get_hit_count(images['public'])) # Open image for read buff = io.BytesIO() with self.driver.open_for_read(images['public']) as cache_file: for chunk in cache_file: buff.write(chunk) self.assertEqual(DATA, buff.getvalue()) # verify now cache hit count for above image is 1 self.assertEqual(1, self.driver.get_hit_count(images['public'])) def test_open_for_read_with_exception(self): self.start_server(enable_cache=True) self.driver = centralized_db.Driver() self.driver.configure() images = self.load_data() self.assertFalse(self.driver.is_cached(images['public'])) # Cache one image path = '/v2/cache/%s' % images['public'] self.api_put(path) self.wait_for_caching(images['public']) # verify image is cached self.assertTrue(self.driver.is_cached(images['public'])) # verify cache hit count for above image is 0 self.assertEqual(0, self.driver.get_hit_count(images['public'])) # Open image for read buff = io.BytesIO() try: with self.driver.open_for_read(images['public']): raise IOError except Exception as e: self.assertIsInstance(e, IOError) self.assertEqual(b'', buff.getvalue()) # verify now cache hit count for above image is 1 even exception is # raised self.assertEqual(1, self.driver.get_hit_count(images['public'])) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8903067 glance-29.0.0/glance/tests/functional/serial/0000775000175000017500000000000000000000000021072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/serial/__init__.py0000664000175000017500000000000000000000000023171 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/serial/test_scrubber.py0000664000175000017500000004061400000000000024317 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import os import sys import time import httplib2 from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids from glance import context import glance.db as db_api from glance.tests import functional from glance.tests.utils import execute CONF = cfg.CONF class TestScrubber(functional.FunctionalTest): """Test that delayed_delete works and the scrubber deletes""" def setUp(self): super(TestScrubber, self).setUp() self.api_server.deployment_flavor = 'noauth' self.admin_context = context.get_admin_context(show_deleted=True) CONF.set_override('connection', self.api_server.sql_connection, group='database') def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': uuids.TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def _send_create_image_http_request(self, path, body=None): headers = { "Content-Type": "application/json", "X-Roles": "admin", } body = body or {'container_format': 'ovf', 'disk_format': 'raw', 'name': 'test_image', 'visibility': 'public'} body = jsonutils.dumps(body) return httplib2.Http().request(path, 'POST', body, self._headers(headers)) def _send_upload_image_http_request(self, path, body=None): headers = { "Content-Type": "application/octet-stream" } return httplib2.Http().request(path, 'PUT', body, self._headers(headers)) def _send_http_request(self, path, method): headers = { "Content-Type": "application/json" } return httplib2.Http().request(path, method, None, self._headers(headers)) def _get_pending_delete_image(self, image_id): # In Glance V2, there is no way to get the 'pending_delete' image from # API. So we get the image from db here for testing. # Clean the session cache first to avoid connecting to the old db data. db_api.get_api()._FACADE = None image = db_api.get_api().image_get(self.admin_context, image_id) return image def test_delayed_delete(self): """ test that images don't get deleted immediately and that the scrubber scrubs them """ self.cleanup() kwargs = self.__dict__.copy() self.start_servers(delayed_delete=True, daemon=True, metadata_encryption_key='', **kwargs) path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) response, content = self._send_create_image_http_request(path) self.assertEqual(http.client.CREATED, response.status) image = jsonutils.loads(content) self.assertEqual('queued', image['status']) file_path = "%s/%s/file" % (path, image['id']) response, content = self._send_upload_image_http_request(file_path, body='XXX') self.assertEqual(http.client.NO_CONTENT, response.status) path = "%s/%s" % (path, image['id']) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(http.client.NO_CONTENT, response.status) image = self._get_pending_delete_image(image['id']) self.assertEqual('pending_delete', image['status']) self.wait_for_scrub(image['id']) self.stop_servers() def test_scrubber_app(self): """ test that the glance-scrubber script runs successfully when not in daemon mode """ self.cleanup() kwargs = self.__dict__.copy() self.start_servers(delayed_delete=True, daemon=False, metadata_encryption_key='', **kwargs) path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) response, content = self._send_create_image_http_request(path) self.assertEqual(http.client.CREATED, response.status) image = jsonutils.loads(content) self.assertEqual('queued', image['status']) file_path = "%s/%s/file" % (path, image['id']) response, content = self._send_upload_image_http_request(file_path, body='XXX') self.assertEqual(http.client.NO_CONTENT, response.status) path = "%s/%s" % (path, image['id']) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(http.client.NO_CONTENT, response.status) image = self._get_pending_delete_image(image['id']) self.assertEqual('pending_delete', image['status']) # wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # scrub images and make sure they get deleted exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(image['id']) self.stop_servers() def test_scrubber_delete_handles_exception(self): """ Test that the scrubber handles the case where an exception occurs when _delete() is called. The scrubber should not write out queue files in this case. """ # Start servers. self.cleanup() kwargs = self.__dict__.copy() self.start_servers(delayed_delete=True, daemon=False, default_store='file', **kwargs) # Check that we are using a file backend. self.assertEqual(self.api_server.default_store, 'file') # add an image path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) response, content = self._send_create_image_http_request(path) self.assertEqual(http.client.CREATED, response.status) image = jsonutils.loads(content) self.assertEqual('queued', image['status']) file_path = "%s/%s/file" % (path, image['id']) response, content = self._send_upload_image_http_request(file_path, body='XXX') self.assertEqual(http.client.NO_CONTENT, response.status) path = "%s/%s" % (path, image['id']) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) # delete the image response, content = self._send_http_request(path, 'DELETE') self.assertEqual(http.client.NO_CONTENT, response.status) # ensure the image is marked pending delete. image = self._get_pending_delete_image(image['id']) self.assertEqual('pending_delete', image['status']) # Remove the file from the backend. file_path = os.path.join(self.api_server.image_dir, image['id']) os.remove(file_path) # Wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # run the scrubber app, and ensure it doesn't fall over exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(image['id']) self.stop_servers() def test_scrubber_app_queue_errors_not_daemon(self): """ test that the glance-scrubber exits with an exit code > 0 when it fails to lookup images, indicating a configuration error when not in daemon mode. Related-Bug: #1548289 """ # Don't start the registry server to cause intended failure # Don't start the api server to save time exitcode, out, err = self.scrubber_daemon.start( delayed_delete=True, daemon=False) self.assertEqual(0, exitcode, "Failed to spin up the Scrubber daemon. " "Got: %s" % err) # Run the Scrubber exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('Can not get scrub jobs from queue', str(err)) self.stop_server(self.scrubber_daemon) def test_scrubber_restore_image(self): self.cleanup() kwargs = self.__dict__.copy() self.start_servers(delayed_delete=True, daemon=False, metadata_encryption_key='', **kwargs) path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) response, content = self._send_create_image_http_request(path) self.assertEqual(http.client.CREATED, response.status) image = jsonutils.loads(content) self.assertEqual('queued', image['status']) file_path = "%s/%s/file" % (path, image['id']) response, content = self._send_upload_image_http_request(file_path, body='XXX') self.assertEqual(http.client.NO_CONTENT, response.status) path = "%s/%s" % (path, image['id']) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(http.client.NO_CONTENT, response.status) image = self._get_pending_delete_image(image['id']) self.assertEqual('pending_delete', image['status']) def _test_content(): exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s --restore %s" % (exe_cmd, self.scrubber_daemon.conf_file_name, image['id'])) return execute(cmd, raise_error=False) exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content) self.assertEqual(0, exitcode) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) self.stop_servers() def test_scrubber_restore_active_image_raise_error(self): self.cleanup() self.start_servers(delayed_delete=True, daemon=False, metadata_encryption_key='') path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) response, content = self._send_create_image_http_request(path) self.assertEqual(http.client.CREATED, response.status) image = jsonutils.loads(content) self.assertEqual('queued', image['status']) file_path = "%s/%s/file" % (path, image['id']) response, content = self._send_upload_image_http_request(file_path, body='XXX') self.assertEqual(http.client.NO_CONTENT, response.status) path = "%s/%s" % (path, image['id']) response, content = self._send_http_request(path, 'GET') image = jsonutils.loads(content) self.assertEqual('active', image['status']) def _test_content(): exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s --restore %s" % (exe_cmd, self.scrubber_daemon.conf_file_name, image['id'])) return execute(cmd, raise_error=False) exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content) self.assertEqual(1, exitcode) self.assertIn('cannot restore the image from active to active ' '(wanted from_state=pending_delete)', str(err)) self.stop_servers() def test_scrubber_restore_image_non_exist(self): def _test_content(): scrubber = functional.ScrubberDaemon(self.test_dir, self.policy_file) scrubber.write_conf(daemon=False) scrubber.needs_database = True scrubber.create_database() exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s --restore fake_image_id" % (exe_cmd, scrubber.conf_file_name)) return execute(cmd, raise_error=False) exitcode, out, err = self.wait_for_scrubber_shutdown(_test_content) self.assertEqual(1, exitcode) self.assertIn('No image found with ID fake_image_id', str(err)) def test_scrubber_restore_image_with_daemon_raise_error(self): exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --daemon --restore fake_image_id" % exe_cmd) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('The restore and daemon options should not be set ' 'together', str(err)) def test_scrubber_restore_image_with_daemon_running(self): self.cleanup() self.scrubber_daemon.start(daemon=True) # Give the scrubber some time to start. time.sleep(5) exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --restore fake_image_id" % exe_cmd) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('glance-scrubber is already running', str(err)) self.stop_server(self.scrubber_daemon) def wait_for_scrubber_shutdown(self, func): # NOTE(wangxiyuan, rosmaita): The image-restore functionality contains # a check to make sure the scrubber isn't also running in daemon mode # to prevent a race condition between a delete and a restore. # Sometimes the glance-scrubber process which is setup by the # previous test can't be shutdown immediately, so if we get the "daemon # running" message we sleep and try again. not_down_msg = 'glance-scrubber is already running' total_wait = 15 for _ in range(total_wait): exitcode, out, err = func() if exitcode == 1 and not_down_msg in str(err): time.sleep(1) continue return exitcode, out, err else: self.fail('Scrubber did not shut down within {} sec'.format( total_wait)) def wait_for_scrub(self, image_id): """ NOTE(jkoelker) The build servers sometimes take longer than 15 seconds to scrub. Give it up to 5 min, checking checking every 15 seconds. When/if it flips to deleted, bail immediately. """ wait_for = 300 # seconds check_every = 15 # seconds for _ in range(wait_for // check_every): time.sleep(check_every) image = db_api.get_api().image_get(self.admin_context, image_id) if (image['status'] == 'deleted' and image['deleted'] == True): break else: continue else: self.fail('image was never scrubbed') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/store_utils.py0000664000175000017500000000545300000000000022550 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to set testcases up for Swift tests. """ import http.client import http.server import threading from oslo_utils import units FIVE_KB = 5 * units.Ki class RemoteImageHandler(http.server.BaseHTTPRequestHandler): def do_HEAD(self): """ Respond to an image HEAD request fake metadata """ if 'images' in self.path: self.send_response(http.client.OK) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', FIVE_KB) self.end_headers() return else: self.send_error( http.client.NOT_FOUND, 'File Not Found: %s' % self.path, ) return def do_GET(self): """ Respond to an image GET request with fake image content. """ if 'images' in self.path: self.send_response(http.client.OK) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', FIVE_KB) self.end_headers() image_data = b'*' * FIVE_KB self.wfile.write(image_data) self.wfile.close() return else: self.send_error( http.client.NOT_FOUND, 'File Not Found: %s' % self.path, ) return def log_message(self, format, *args): """ Simple override to prevent writing crap to stderr... """ pass def setup_http(test): server_class = http.server.HTTPServer remote_server = server_class(('127.0.0.1', 0), RemoteImageHandler) remote_ip, remote_port = remote_server.server_address def serve_requests(httpd): httpd.serve_forever() threading.Thread(target=serve_requests, args=(remote_server,)).start() test.http_server = remote_server test.http_ip = remote_ip test.http_port = remote_port test.addCleanup(test.http_server.shutdown) def get_http_uri(test, image_id): uri = ('http://%(http_ip)s:%(http_port)d/images/' % {'http_ip': test.http_ip, 'http_port': test.http_port}) uri += image_id return uri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_api.py0000664000175000017500000001526700000000000022010 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Version-independent api tests""" import http.client as http_client import httplib2 from oslo_serialization import jsonutils from glance.tests import functional from glance.tests.unit import test_versions as tv class TestApiVersions(functional.FunctionalTest): def test_version_configurations(self): """Test that versioning is handled properly through all channels""" self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d' % self.api_port versions = {'versions': tv.get_versions_list(url, enabled_cache=True)} # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(versions, content) def test_v2_api_configuration(self): self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d' % self.api_port versions = {'versions': tv.get_versions_list(url, enabled_cache=True)} # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(versions, content) class TestApiVersionsMultistore(functional.MultipleBackendFunctionalTest): def test_version_configurations(self): """Test that versioning is handled properly through all channels""" self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d' % self.api_port versions = {'versions': tv.get_versions_list(url, enabled_backends=True, enabled_cache=True)} # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(versions, content) def test_v2_api_configuration(self): self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d' % self.api_port versions = {'versions': tv.get_versions_list(url, enabled_backends=True, enabled_cache=True)} # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(versions, content) class TestApiPaths(functional.FunctionalTest): def setUp(self): super(TestApiPaths, self).setUp() self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d' % self.api_port self.versions = {'versions': tv.get_versions_list(url, enabled_cache=True)} images = {'images': []} self.images_json = jsonutils.dumps(images) def test_get_root_path(self): """Assert GET / with `no Accept:` header. Verify version choices returned. Bug lp:803260 no Accept header causes a 500 in glance-api """ path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(self.versions, content) def test_get_root_path_with_unknown_header(self): """Assert GET / with Accept: unknown header Verify version choices returned. Verify message in API log about unknown accept header. """ path = 'http://%s:%d/' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'unknown'} response, content_json = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(self.versions, content) def test_get_va1_images_path(self): """Assert GET /va.1/images with no Accept: header Verify version choices returned """ path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(self.versions, content) def test_get_versions_path(self): """Assert GET /versions with no Accept: header Verify version choices returned """ path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.OK, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(self.versions, content) def test_get_versions_choices(self): """Verify version choices returned""" path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content_json = http.request(path, 'GET') self.assertEqual(http_client.MULTIPLE_CHOICES, response.status) content = jsonutils.loads(content_json.decode()) self.assertEqual(self.versions, content) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_cache_middleware.py0000664000175000017500000003767700000000000024510 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests a Glance API server which uses the caching middleware that uses the default SQLite cache driver. We use the filesystem store, but that is really not relevant, as the image cache is transparent to the backend store. """ import http.client as http_client import os import shutil import httplib2 from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import units from glance.tests import functional from glance.tests.utils import skip_if_disabled from glance.tests.utils import xattr_writes_supported FIVE_KB = 5 * units.Ki class BaseCacheMiddlewareTest(object): def _headers(self, extra=None): headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': uuids.tenant, 'X-Roles': 'reader,member', } if extra: headers.update(extra) return headers @skip_if_disabled def test_cache_middleware_transparent_v2(self): """Ensure the v2 API image transfer calls trigger caching""" self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(http_client.CREATED, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) headers = self._headers({'content-type': 'application/octet-stream'}) image_data = "*" * FIVE_KB response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(http_client.NO_CONTENT, response.status) # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) # Grab the image http = httplib2.Http() response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.OK, response.status) # Verify image now in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertTrue(os.path.exists(image_cached_path)) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v2/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE', headers=headers) self.assertEqual(http_client.NO_CONTENT, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_partially_downloaded_images_are_not_cached_v2_api(self): """ Verify that we do not cache images that were downloaded partially using v2 images API. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(http_client.CREATED, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) headers = self._headers({'content-type': 'application/octet-stream'}) image_data = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(http_client.NO_CONTENT, response.status) # Verify that this image is not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) # partially download this image and verify status 206 http = httplib2.Http() # range download request range_ = 'bytes=3-5' headers = self._headers({'Range': range_}) response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.PARTIAL_CONTENT, response.status) self.assertEqual(b'DEF', content) # content-range download request # NOTE(dharinic): Glance incorrectly supports Content-Range for partial # image downloads in requests. This test is included to ensure that # we prevent regression. content_range = 'bytes 3-5/*' headers = self._headers({'Content-Range': content_range}) response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.PARTIAL_CONTENT, response.status) self.assertEqual(b'DEF', content) # verify that we do not cache the partial image image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_partial_download_of_cached_images_v2_api(self): """ Verify that partial download requests for a fully cached image succeeds; we do not serve it from cache. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(http_client.CREATED, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) headers = self._headers({'content-type': 'application/octet-stream'}) image_data = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(http_client.NO_CONTENT, response.status) # Verify that this image is not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) # Download the entire image http = httplib2.Http() response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.OK, response.status) self.assertEqual(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ', content) # Verify that the image is now in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertTrue(os.path.exists(image_cached_path)) # Modify the data in cache so we can verify the partially downloaded # content was not from cache indeed. with open(image_cached_path, 'w') as cache_file: cache_file.write('0123456789') # Partially attempt a download of this image and verify that is not # from cache # range download request range_ = 'bytes=3-5' headers = self._headers({'Range': range_, 'content-type': 'application/json'}) response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.PARTIAL_CONTENT, response.status) self.assertEqual(b'DEF', content) self.assertNotEqual(b'345', content) self.assertNotEqual(image_data, content) # content-range download request # NOTE(dharinic): Glance incorrectly supports Content-Range for partial # image downloads in requests. This test is included to ensure that # we prevent regression. content_range = 'bytes 3-5/*' headers = self._headers({'Content-Range': content_range, 'content-type': 'application/json'}) response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.PARTIAL_CONTENT, response.status) self.assertEqual(b'DEF', content) self.assertNotEqual(b'345', content) self.assertNotEqual(image_data, content) self.stop_servers() @skip_if_disabled def test_cache_middleware_trans_v2_without_download_image_policy(self): """ Ensure the image v2 API image transfer applied 'download_image' policy enforcement. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(http_client.CREATED, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) headers = self._headers({'content-type': 'application/octet-stream'}) image_data = "*" * FIVE_KB response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(http_client.NO_CONTENT, response.status) # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) rules = {"context_is_admin": "role:admin", "default": "", "download_image": "!"} self.set_policy_rules(rules) # Grab the image http = httplib2.Http() response, content = http.request(path, 'GET', headers=headers) self.assertEqual(http_client.FORBIDDEN, response.status) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v2/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE', headers=headers) self.assertEqual(http_client.NO_CONTENT, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() class TestImageCacheXattr(functional.FunctionalTest, BaseCacheMiddlewareTest): """Functional tests that exercise the image cache using the xattr driver""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): raise self.skipException('Test disabled.') if not getattr(self, 'inited', False): try: import xattr # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") raise self.skipException(self.disabled_message) self.inited = True self.disabled = False self.image_cache_driver = "xattr" super(TestImageCacheXattr, self).setUp() self.api_server.deployment_flavor = "caching" if not xattr_writes_supported(self.test_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") raise self.skipException(self.disabled_message) def tearDown(self): super(TestImageCacheXattr, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) class TestImageCacheSqlite(functional.FunctionalTest, BaseCacheMiddlewareTest): """ Functional tests that exercise the image cache using the SQLite driver """ def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): return if not getattr(self, 'inited', False): try: import sqlite3 # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-sqlite3 not installed.") return self.inited = True self.disabled = False super(TestImageCacheSqlite, self).setUp() self.api_server.deployment_flavor = "caching" def tearDown(self): super(TestImageCacheSqlite, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_client_exceptions.py0000664000175000017500000001065200000000000024747 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test asserting strongly typed exceptions from glance client""" import http.client as http_client import eventlet.patcher import httplib2 import webob.dec import webob.exc from glance.common import client from glance.common import exception from glance.common import wsgi from glance.tests import functional from glance.tests import utils eventlet.patcher.monkey_patch(socket=True) class ExceptionTestApp(object): """ Test WSGI application which can respond with multiple kinds of HTTP status codes """ @webob.dec.wsgify def __call__(self, request): path = request.path_qs if path == "/rate-limit": request.response = webob.exc.HTTPRequestEntityTooLarge() elif path == "/rate-limit-retry": request.response.retry_after = 10 request.response.status = http_client.REQUEST_ENTITY_TOO_LARGE elif path == "/service-unavailable": request.response = webob.exc.HTTPServiceUnavailable() elif path == "/service-unavailable-retry": request.response.retry_after = 10 request.response.status = http_client.SERVICE_UNAVAILABLE elif path == "/expectation-failed": request.response = webob.exc.HTTPExpectationFailed() elif path == "/server-error": request.response = webob.exc.HTTPServerError() elif path == "/server-traceback": raise exception.ServerError() class TestClientExceptions(functional.FunctionalTest): def setUp(self): super(TestClientExceptions, self).setUp() self.port = utils.get_unused_port() server = wsgi.Server() self.config(bind_host='127.0.0.1') self.config(workers=0) server.start(ExceptionTestApp(), self.port) self.client = client.BaseClient("127.0.0.1", self.port) def _do_test_exception(self, path, exc_type): try: self.client.do_request("GET", path) self.fail('expected %s' % exc_type) except exc_type as e: if 'retry' in path: self.assertEqual(10, e.retry_after) def test_rate_limited(self): """ Test rate limited response """ self._do_test_exception('/rate-limit', exception.LimitExceeded) def test_rate_limited_retry(self): """ Test rate limited response with retry """ self._do_test_exception('/rate-limit-retry', exception.LimitExceeded) def test_service_unavailable(self): """ Test service unavailable response """ self._do_test_exception('/service-unavailable', exception.ServiceUnavailable) def test_service_unavailable_retry(self): """ Test service unavailable response with retry """ self._do_test_exception('/service-unavailable-retry', exception.ServiceUnavailable) def test_expectation_failed(self): """ Test expectation failed response """ self._do_test_exception('/expectation-failed', exception.UnexpectedStatus) def test_server_error(self): """ Test server error response """ self._do_test_exception('/server-error', exception.ServerError) def test_server_traceback(self): """ Verify that the wsgi server does not return tracebacks to the client on 500 errors (bug 1192132) """ http = httplib2.Http() path = ('http://%s:%d/server-traceback' % ('127.0.0.1', self.port)) response, content = http.request(path, 'GET') self.assertNotIn(b'ServerError', content) self.assertEqual(http_client.INTERNAL_SERVER_ERROR, response.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_client_redirects.py0000664000175000017500000001165400000000000024555 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases testing glance client redirect-following.""" import http.client as http import eventlet.patcher import webob.dec import webob.exc from glance.common import client from glance.common import exception from glance.common import wsgi from glance.tests import functional from glance.tests import utils eventlet.patcher.monkey_patch(socket=True) def RedirectTestApp(name): class App(object): """ Test WSGI application which can respond with multiple kinds of HTTP redirects and is used to verify Glance client redirects. """ def __init__(self): """ Initialize app with a name and port. """ self.name = name @webob.dec.wsgify def __call__(self, request): """ Handles all requests to the application. """ base = "http://%s" % request.host path = request.path_qs if path == "/": return "root" elif path == "/302": url = "%s/success" % base raise webob.exc.HTTPFound(location=url) elif path == "/302?with_qs=yes": url = "%s/success?with_qs=yes" % base raise webob.exc.HTTPFound(location=url) elif path == "/infinite_302": raise webob.exc.HTTPFound(location=request.url) elif path.startswith("/redirect-to"): url = "http://127.0.0.1:%s/success" % path.split("-")[-1] raise webob.exc.HTTPFound(location=url) elif path == "/success": return "success_from_host_%s" % self.name elif path == "/success?with_qs=yes": return "success_with_qs" return "fail" return App class TestClientRedirects(functional.FunctionalTest): def setUp(self): super(TestClientRedirects, self).setUp() self.port_one = utils.get_unused_port() self.port_two = utils.get_unused_port() server_one = wsgi.Server() server_two = wsgi.Server() self.config(bind_host='127.0.0.1') self.config(workers=0) server_one.start(RedirectTestApp("one")(), self.port_one) server_two.start(RedirectTestApp("two")(), self.port_two) self.client = client.BaseClient("127.0.0.1", self.port_one) def test_get_without_redirect(self): """ Test GET with no redirect """ response = self.client.do_request("GET", "/") self.assertEqual(http.OK, response.status) self.assertEqual(b"root", response.read()) def test_get_with_one_redirect(self): """ Test GET with one 302 FOUND redirect """ response = self.client.do_request("GET", "/302") self.assertEqual(http.OK, response.status) self.assertEqual(b"success_from_host_one", response.read()) def test_get_with_one_redirect_query_string(self): """ Test GET with one 302 FOUND redirect w/ a query string """ response = self.client.do_request("GET", "/302", params={'with_qs': 'yes'}) self.assertEqual(http.OK, response.status) self.assertEqual(b"success_with_qs", response.read()) def test_get_with_max_redirects(self): """ Test we don't redirect forever. """ self.assertRaises(exception.MaxRedirectsExceeded, self.client.do_request, "GET", "/infinite_302") def test_post_redirect(self): """ Test POST with 302 redirect """ response = self.client.do_request("POST", "/302") self.assertEqual(http.OK, response.status) self.assertEqual(b"success_from_host_one", response.read()) def test_redirect_to_new_host(self): """ Test redirect to one host and then another. """ url = "/redirect-to-%d" % self.port_two response = self.client.do_request("POST", url) self.assertEqual(http.OK, response.status) self.assertEqual(b"success_from_host_two", response.read()) response = self.client.do_request("POST", "/success") self.assertEqual(http.OK, response.status) self.assertEqual(b"success_from_host_one", response.read()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_cors_middleware.py0000664000175000017500000000715500000000000024377 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests cors middleware.""" import http.client import httplib2 from oslo_utils.fixture import uuidsentinel as uuids from glance.tests import functional class TestCORSMiddleware(functional.FunctionalTest): '''Provide a basic smoke test to ensure CORS middleware is active. The tests below provide minimal confirmation that the CORS middleware is active, and may be configured. For comprehensive tests, please consult the test suite in oslo_middleware. ''' def setUp(self): super(TestCORSMiddleware, self).setUp() # Cleanup is handled in teardown of the parent class. self.api_server.deployment_flavor = "caching" self.start_servers(**self.__dict__.copy()) self.http = httplib2.Http() self.api_path = "http://%s:%d/v2/images" % ("127.0.0.1", self.api_port) def _headers(self, extra=None): headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': uuids.tenant, 'X-Roles': 'reader,member', } if extra: headers.update(extra) return headers def test_valid_cors_options_request(self): (r_headers, content) = self.http.request( self.api_path, 'OPTIONS', headers=self._headers({ 'Origin': 'http://valid.example.com', 'Access-Control-Request-Method': 'GET' })) self.assertEqual(http.client.OK, r_headers.status) self.assertIn('access-control-allow-origin', r_headers) self.assertEqual('http://valid.example.com', r_headers['access-control-allow-origin']) def test_invalid_cors_options_request(self): (r_headers, content) = self.http.request( self.api_path, 'OPTIONS', headers=self._headers({ 'Origin': 'http://invalid.example.com', 'Access-Control-Request-Method': 'GET' })) self.assertEqual(http.client.OK, r_headers.status) self.assertNotIn('access-control-allow-origin', r_headers) def test_valid_cors_get_request(self): (r_headers, content) = self.http.request( self.api_path, 'GET', headers=self._headers({ 'Origin': 'http://valid.example.com' })) self.assertEqual(http.client.OK, r_headers.status) self.assertIn('access-control-allow-origin', r_headers) self.assertEqual('http://valid.example.com', r_headers['access-control-allow-origin']) def test_invalid_cors_get_request(self): (r_headers, content) = self.http.request( self.api_path, 'GET', headers=self._headers({ 'Origin': 'http://invalid.example.com' })) self.assertEqual(http.client.OK, r_headers.status) self.assertNotIn('access-control-allow-origin', r_headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_glance_manage.py0000664000175000017500000001612700000000000023774 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases for glance-manage""" import os import sys from oslo_config import cfg from oslo_db import options as db_options from glance.common import utils from glance.db import migration as db_migration from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.db.sqlalchemy import api as db_api from glance.tests import functional from glance.tests.utils import depends_on_exe from glance.tests.utils import execute from glance.tests.utils import skip_if_disabled CONF = cfg.CONF class TestGlanceManage(functional.FunctionalTest): """Functional tests for glance-manage""" def setUp(self): super(TestGlanceManage, self).setUp() conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite') self.connection = ('connection = sqlite:///%s' % self.db_filepath) db_options.set_defaults(CONF, connection='sqlite:///%s' % self.db_filepath) def _db_command(self, db_method): with open(self.conf_filepath, 'w') as conf_file: conf_file.write('[database]\n') conf_file.write(self.connection) conf_file.flush() cmd = ('%s -m glance.cmd.manage --config-file %s db %s' % (sys.executable, self.conf_filepath, db_method)) return execute(cmd, raise_error=True) def _check_db(self, expected_exitcode): with open(self.conf_filepath, 'w') as conf_file: conf_file.write('[database]\n') conf_file.write(self.connection) conf_file.flush() cmd = ('%s -m glance.cmd.manage --config-file %s db check' % (sys.executable, self.conf_filepath)) exitcode, out, err = execute(cmd, raise_error=True, expected_exitcode=expected_exitcode) return exitcode, out def _assert_table_exists(self, db_table): cmd = ("sqlite3 {0} \"SELECT name FROM sqlite_master WHERE " "type='table' AND name='{1}'\"").format(self.db_filepath, db_table) exitcode, out, err = execute(cmd, raise_error=True) msg = "Expected table {0} was not found in the schema".format(db_table) self.assertEqual(out.rstrip().decode("utf-8"), db_table, msg) @depends_on_exe('sqlite3') @skip_if_disabled def test_db_creation(self): """Test schema creation by db_sync on a fresh DB""" self._db_command(db_method='sync') for table in ['images', 'image_tags', 'image_locations', 'image_members', 'image_properties']: self._assert_table_exists(table) @depends_on_exe('sqlite3') @skip_if_disabled def test_sync(self): """Test DB sync which internally calls EMC""" self._db_command(db_method='sync') contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) cmd = ("sqlite3 {0} \"SELECT version_num FROM alembic_version\"" ).format(self.db_filepath) exitcode, out, err = execute(cmd, raise_error=True) self.assertEqual(contract_head, out.rstrip().decode("utf-8")) @depends_on_exe('sqlite3') @skip_if_disabled def test_check(self): exitcode, out = self._check_db(3) self.assertEqual(3, exitcode) self._db_command(db_method='expand') if data_migrations.has_pending_migrations(db_api.get_engine()): exitcode, out = self._check_db(4) self.assertEqual(4, exitcode) self._db_command(db_method='migrate') exitcode, out = self._check_db(5) self.assertEqual(5, exitcode) self._db_command(db_method='contract') exitcode, out = self._check_db(0) self.assertEqual(0, exitcode) @depends_on_exe('sqlite3') @skip_if_disabled def test_expand(self): """Test DB expand""" self._db_command(db_method='expand') expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) cmd = ("sqlite3 {0} \"SELECT version_num FROM alembic_version\"" ).format(self.db_filepath) exitcode, out, err = execute(cmd, raise_error=True) self.assertEqual(expand_head, out.rstrip().decode("utf-8")) exitcode, out, err = self._db_command(db_method='expand') self.assertIn('Database expansion is up to date. ' 'No expansion needed.', str(out)) @depends_on_exe('sqlite3') @skip_if_disabled def test_migrate(self): """Test DB migrate""" self._db_command(db_method='expand') if data_migrations.has_pending_migrations(db_api.get_engine()): self._db_command(db_method='migrate') expand_head = alembic_migrations.get_alembic_branch_head( db_migration.EXPAND_BRANCH) cmd = ("sqlite3 {0} \"SELECT version_num FROM alembic_version\"" ).format(self.db_filepath) exitcode, out, err = execute(cmd, raise_error=True) self.assertEqual(expand_head, out.rstrip().decode("utf-8")) self.assertEqual(False, data_migrations.has_pending_migrations( db_api.get_engine())) if data_migrations.has_pending_migrations(db_api.get_engine()): exitcode, out, err = self._db_command(db_method='migrate') self.assertIn('Database migration is up to date. No migration ' 'needed.', str(out)) @depends_on_exe('sqlite3') @skip_if_disabled def test_contract(self): """Test DB contract""" self._db_command(db_method='expand') if data_migrations.has_pending_migrations(db_api.get_engine()): self._db_command(db_method='migrate') self._db_command(db_method='contract') contract_head = alembic_migrations.get_alembic_branch_head( db_migration.CONTRACT_BRANCH) cmd = ("sqlite3 {0} \"SELECT version_num FROM alembic_version\"" ).format(self.db_filepath) exitcode, out, err = execute(cmd, raise_error=True) self.assertEqual(contract_head, out.rstrip().decode("utf-8")) exitcode, out, err = self._db_command(db_method='contract') self.assertIn('Database is up to date. No migrations needed.', str(out)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_gzip_middleware.py0000664000175000017500000000323100000000000024371 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests gzip middleware.""" import httplib2 from glance.tests import functional from glance.tests import utils class GzipMiddlewareTest(functional.FunctionalTest): @utils.skip_if_disabled def test_gzip_requests(self): self.cleanup() self.start_servers(**self.__dict__.copy()) def request(path, headers=None): # We don't care what version we're using here so, # sticking with latest url = 'http://127.0.0.1:%s/v2/%s' % (self.api_port, path) http = httplib2.Http() return http.request(url, 'GET', headers=headers) # Accept-Encoding: Identity headers = {'Accept-Encoding': 'identity'} response, content = request('images', headers=headers) self.assertIsNone(response.get("-content-encoding")) # Accept-Encoding: gzip headers = {'Accept-Encoding': 'gzip'} response, content = request('images', headers=headers) self.assertEqual('gzip', response.get("-content-encoding")) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_healthcheck_middleware.py0000664000175000017500000000340600000000000025667 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests healthcheck middleware.""" import http.client import tempfile import httplib2 from glance.tests import functional from glance.tests import utils class HealthcheckMiddlewareTest(functional.FunctionalTest): def request(self): url = 'http://127.0.0.1:%s/healthcheck' % self.api_port http = httplib2.Http() return http.request(url, 'GET') @utils.skip_if_disabled def test_healthcheck_enabled(self): self.cleanup() self.start_servers(**self.__dict__.copy()) response, content = self.request() self.assertEqual(b'OK', content) self.assertEqual(http.client.OK, response.status) self.stop_servers() def test_healthcheck_disabled(self): with tempfile.NamedTemporaryFile() as test_disable_file: self.cleanup() self.api_server.disable_path = test_disable_file.name self.start_servers(**self.__dict__.copy()) response, content = self.request() self.assertEqual(b'DISABLED BY FILE', content) self.assertEqual(http.client.SERVICE_UNAVAILABLE, response.status) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_logging.py0000664000175000017500000000534600000000000022662 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test case that tests logging output""" import http.client as http import os import stat import httplib2 from glance.tests import functional class TestLogging(functional.FunctionalTest): """Functional tests for Glance's logging output""" def test_debug(self): """ Test logging output proper when debug is on. """ self.cleanup() self.start_servers() # The default functional test case has both debug on. Let's verify # that debug statements appear in the API logs. self.assertTrue(os.path.exists(self.api_server.log_file)) with open(self.api_server.log_file, 'r') as f: api_log_out = f.read() self.assertIn('DEBUG glance', api_log_out) self.stop_servers() def test_no_debug(self): """ Test logging output proper when debug is off. """ self.cleanup() self.start_servers(debug=False) self.assertTrue(os.path.exists(self.api_server.log_file)) with open(self.api_server.log_file, 'r') as f: api_log_out = f.read() self.assertNotIn('DEBUG glance', api_log_out) self.stop_servers() def assertNotEmptyFile(self, path): self.assertTrue(os.path.exists(path)) self.assertNotEqual(os.stat(path)[stat.ST_SIZE], 0) def test_logrotate(self): """ Test that we notice when our log file has been rotated """ # Moving in-use files is not supported on Windows. # The log handler itself may be configured to rotate files. if os.name == 'nt': raise self.skipException("Unsupported platform.") self.cleanup() self.start_servers() self.assertNotEmptyFile(self.api_server.log_file) os.rename(self.api_server.log_file, self.api_server.log_file + ".1") path = "http://%s:%d/" % ("127.0.0.1", self.api_port) response, content = httplib2.Http().request(path, 'GET') self.assertEqual(http.MULTIPLE_CHOICES, response.status) self.assertNotEmptyFile(self.api_server.log_file) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_reload.py0000664000175000017500000001331100000000000022471 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import os import re import time import psutil import requests from glance.tests import functional from glance.tests.utils import execute TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) def set_config_value(filepath, key, value): """Set 'key = value' in config file""" replacement_line = '%s = %s\n' % (key, value) match = re.compile(r'^%s\s+=' % key).match with open(filepath, 'r+') as f: lines = f.readlines() f.seek(0, 0) f.truncate() for line in lines: f.write(line if not match(line) else replacement_line) class TestReload(functional.FunctionalTest): """Test configuration reload""" def setUp(self): super(TestReload, self).setUp() self.cleanup() self.workers = 1 self.include_scrubber = False def tearDown(self): if not self.disabled: self.stop_servers() super(TestReload, self).tearDown() def ticker(self, message, seconds=60, tick=0.01): """ Allows repeatedly testing for an expected result for a finite amount of time. :param message: Message to display on timeout :param seconds: Time in seconds after which we timeout :param tick: Time to sleep before rechecking for expected result :returns: 'True' or fails the test with 'message' on timeout """ # We default to allowing 60 seconds timeout but # typically only a few hundredths of a second # are needed. num_ticks = seconds * (1.0 / tick) count = 0 while count < num_ticks: count += 1 time.sleep(tick) yield self.fail(message) def _get_children(self, server): pid = None pid = self._get_parent(server) process = psutil.Process(pid) children = process.children() pids = set() for child in children: pids.add(child.pid) return pids def _get_parent(self, server): if server == 'api': return self.api_server.process_pid def _conffile(self, service): conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, '%s.conf' % service) return conf_filepath def test_reload(self): """Test SIGHUP picks up new config values""" def check_pids(pre, post=None, workers=2): if post is None: if len(pre) == workers: return True else: return False if len(post) == workers: # Check new children have different pids if post.intersection(pre) == set(): return True return False self.api_server.fork_socket = False self.start_servers(fork_socket=False, **vars(self)) pre_pids = {} post_pids = {} path = self._url('/') response = requests.get(path) self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) del response # close socket so that process audit is reliable pre_pids['api'] = self._get_children('api') # Test changing the workers value creates all new children # This recycles the existing socket msg = 'Start timeout' for _ in self.ticker(msg): pre_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], workers=1): break set_config_value(self._conffile('api'), 'workers', '2') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'Worker change timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break # Test changing the http bind_host # This requires a new socket pre_pids['api'] = self._get_children('api') set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'http bind_host timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break path = self._url('/') response = requests.get(path) self.assertEqual(http.MULTIPLE_CHOICES, response.status_code) del response # Test logging configuration change # This recycles the existing socket conf_dir = os.path.join(self.test_dir, 'etc') log_file = conf_dir + 'new.log' self.assertFalse(os.path.exists(log_file)) set_config_value(self._conffile('api'), 'log_file', log_file) cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'No new log file created' for _ in self.ticker(msg): if os.path.exists(log_file): break ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_sqlite.py0000664000175000017500000000250400000000000022526 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases for sqlite-specific logic""" from glance.tests import functional from glance.tests.utils import depends_on_exe from glance.tests.utils import execute from glance.tests.utils import skip_if_disabled class TestSqlite(functional.FunctionalTest): """Functional tests for sqlite-specific logic""" @depends_on_exe('sqlite3') @skip_if_disabled def test_big_int_mapping(self): """Ensure BigInteger not mapped to BIGINT""" self.cleanup() self.start_servers(**self.__dict__.copy()) cmd = 'sqlite3 tests.sqlite ".schema"' exitcode, out, err = execute(cmd, raise_error=True) self.assertNotIn('BIGINT', out) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/test_wsgi.py0000664000175000017500000001563200000000000022204 0ustar00zuulzuul00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for `glance.wsgi`.""" import http.client as http import os import socket import time from oslo_serialization import jsonutils from oslo_utils.fixture import uuidsentinel as uuids import requests from glance.common import wsgi from glance.tests import functional class TestWSGIServer(functional.FunctionalTest): """WSGI server tests.""" def test_client_socket_timeout(self): self.config(workers=0) self.config(client_socket_timeout=1) """Verify connections are timed out as per 'client_socket_timeout'""" greetings = b'Hello, World!!!' def hello_world(env, start_response): start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server() server.start(hello_world, 0) port = server.sock.getsockname()[1] def get_request(delay=0.0): # Socket timeouts are handled rather inconsistently on Windows. # recv may either return nothing OR raise a ConnectionAbortedError. exp_exc = OSError if os.name == 'nt' else () try: sock = socket.socket() sock.connect(('127.0.0.1', port)) time.sleep(delay) sock.send(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') return sock.recv(1024) except exp_exc: return None # Should succeed - no timeout self.assertIn(greetings, get_request()) # Should fail - connection timed out so we get nothing from the server self.assertFalse(get_request(delay=1.1)) class StagingCleanupBase: def _configure_api_server(self): self.my_api_server.deployment_flavor = 'noauth' def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': uuids.tenant1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_clean_on_start(self): staging = os.path.join(self.test_dir, 'staging') # Start the server self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Stage data for the image path = self._url('/v2/images/%s/stage' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'ZZZZZ' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # Stop the server self.my_api_server.stop() # Create more files in staging, one unrecognized one, and one # uuid that matches nothing in the database, and some residue # like we would see from failed conversions and decompressions # for the image we created above. open(os.path.join(staging, 'foo'), 'w') open(os.path.join(staging, uuids.stale), 'w') open(os.path.join(staging, uuids.converting), 'w') converting_fn = os.path.join(staging, '%s.qcow2' % uuids.converting) decompressing_fn = os.path.join(staging, '%s.uc' % uuids.decompressing) open(converting_fn, 'w') open(decompressing_fn, 'w') # Restart the server. We set "needs_database" to False here to avoid # recreating the database during startup, thus causing the server to # think there are no valid images and deleting everything. self.my_api_server.needs_database = False self.start_with_retry(self.my_api_server, 'api_port', 3, **self.__dict__.copy()) # Poll to give it time to come up and do the work. Use the presence # of the extra files to determine if the cleanup has run yet. for i in range(0, 10): try: requests.get(self._url('/v2/images')) except Exception: # Not even answering queries yet pass else: files = os.listdir(staging) if len(files) == 2: break time.sleep(1) # We should still find the not-an-image file... self.assertTrue(os.path.exists(os.path.join(staging, 'foo'))) # ...and make sure the actually-staged image file is still present.... self.assertTrue(os.path.exists(os.path.join(staging, image_id))) # ... but the stale image should be gone, self.assertFalse(os.path.exists(os.path.join(staging, uuids.stale))) # ... along with the residue of the conversion ... self.assertFalse(os.path.exists(converting_fn)) # ... and the residue of the decompression. self.assertFalse(os.path.exists(decompressing_fn)) self.stop_servers() class TestStagingCleanupMultistore(functional.MultipleBackendFunctionalTest, StagingCleanupBase): """Test for staging store cleanup on API server startup. This tests the multistore case. """ def setUp(self): super(TestStagingCleanupMultistore, self).setUp() self.my_api_server = self.api_server_multiple_backend self._configure_api_server() class TestStagingCleanupSingleStore(functional.FunctionalTest, StagingCleanupBase): """Test for staging store cleanup on API server startup. This tests the single store case. """ def setUp(self): super(TestStagingCleanupSingleStore, self).setUp() self.my_api_server = self.api_server self._configure_api_server() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8943074 glance-29.0.0/glance/tests/functional/v2/0000775000175000017500000000000000000000000020142 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/__init__.py0000664000175000017500000000000000000000000022241 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/metadef_base.py0000664000175000017500000000466100000000000023122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils.fixture import uuidsentinel as uuids import requests from glance.tests import functional class MetadefFunctionalTestBase(functional.FunctionalTest): """A basic set of assertions and utilities for testing the metadef API.""" def setUp(self): super().setUp() self.tenant1 = uuids.owner1 self.tenant2 = uuids.owner2 def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': self.tenant1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def assertNamespacesEqual(self, actual, expected): """Assert two namespace dictionaries are the same.""" actual.pop('created_at', None) actual.pop('updated_at', None) expected_namespace = { "namespace": expected['namespace'], "display_name": expected['display_name'], "description": expected['description'], "visibility": expected['visibility'], "protected": False, "owner": expected['owner'], "self": "/v2/metadefs/namespaces/%s" % expected['namespace'], "schema": "/v2/schemas/metadefs/namespace" } self.assertEqual(actual, expected_namespace) def create_namespace(self, path, headers, namespace): """Create a metadef namespace. :param path: string representing the namespace API path :param headers: dictionary with the headers to use for the request :param namespace: dictionary representing the namespace to create :returns: a dictionary of the namespace in the response """ return requests.post(path, headers=headers, json=namespace).json() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_cache_api.py0000664000175000017500000003511200000000000023451 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional class TestImageCache(functional.SynchronousAPIBase): # ToDo(abhishekk): Once system scope is enabled and RBAC is fully # supported, enable these tests for RBAC as well def setUp(self): super(TestImageCache, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self, enable_cache=True): # NOTE(abhishekk): Once sqlite driver is removed, fix these tests # to work with centralized_db driver self.config(image_cache_driver='sqlite') with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestImageCache, self).start_server(enable_cache=enable_cache) def load_data(self): output = {} # Create 1 queued image as well for testing path = "/v2/images" data = { 'name': 'queued-image', 'container_format': 'bare', 'disk_format': 'raw' } response = self.api_post(path, json=data) self.assertEqual(201, response.status_code) image_id = response.json['id'] output['queued'] = image_id for visibility in ['public', 'private']: data = { 'name': '%s-image' % visibility, 'visibility': visibility, 'container_format': 'bare', 'disk_format': 'raw' } response = self.api_post(path, json=data) self.assertEqual(201, response.status_code) image_id = response.json['id'] # Upload some data to image response = self.api_put( '/v2/images/%s/file' % image_id, headers={'Content-Type': 'application/octet-stream'}, data=b'IMAGEDATA') self.assertEqual(204, response.status_code) output[visibility] = image_id return output def list_cache(self, expected_code=200): path = '/v2/cache' response = self.api_get(path) self.assertEqual(expected_code, response.status_code) if response.status_code == 200: return response.json def cache_queue(self, image_id, expected_code=202): # Queue image for prefetching path = '/v2/cache/%s' % image_id response = self.api_put(path) self.assertEqual(expected_code, response.status_code) def cache_delete(self, image_id, expected_code=204): path = '/v2/cache/%s' % image_id response = self.api_delete(path) self.assertEqual(expected_code, response.status_code) def cache_clear(self, target='', expected_code=204): path = '/v2/cache' headers = {} if target: headers['x-image-cache-clear-target'] = target response = self.api_delete(path, headers=headers) if target not in ('', 'cache', 'queue'): self.assertEqual(expected_code, response.status_code) else: self.assertEqual(expected_code, response.status_code) def wait_for_caching(self, image_id, max_sec=10, delay_sec=0.2, start_delay_sec=None): start_time = time.time() done_time = start_time + max_sec if start_delay_sec: time.sleep(start_delay_sec) while time.time() <= done_time: output = self.list_cache()['cached_images'] output = [image['image_id'] for image in output] if output and image_id in output: return time.sleep(delay_sec) msg = "Image {0} failed to cached within {1} sec" raise Exception(msg.format(image_id, max_sec)) def test_cache_list(self): self.start_server(enable_cache=True) images = self.load_data() # Ensure that nothing is cached and nothing is queued for caching output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) # Queue 1 image for caching self.cache_queue(images['public']) output = self.list_cache() self.assertEqual(1, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_queue(self): self.start_server(enable_cache=True) images = self.load_data() # Ensure that nothing is cached and nothing is queued for caching output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) # Queue 1 image for caching self.cache_queue(images['public']) # NOTE(abhishekk): As queue call will immediately start caching # the image, lets wait for completion. self.wait_for_caching(images['public']) # Now verify that we have 1 cached image output = self.list_cache() self.assertEqual(1, len(output['cached_images'])) # Verify same image is cached self.assertIn(images['public'], output['cached_images'][0]['image_id']) def test_cache_delete(self): self.start_server(enable_cache=True) images = self.load_data() # Queue 1 image for caching self.cache_queue(images['public']) self.wait_for_caching(images['public']) # Now verify that we have 1 cached image output = self.list_cache() self.assertEqual(1, len(output['cached_images'])) # Verify same image is cached self.assertIn(images['public'], output['cached_images'][0]['image_id']) # Delete cached image self.cache_delete(images['public']) # Now verify that we have 0 cached image output = self.list_cache() self.assertEqual(0, len(output['cached_images'])) def test_cache_clear_queued_images(self): self.start_server(enable_cache=True) images = self.load_data() # Queue 2 images for caching self.cache_queue(images['public']) self.cache_queue(images['private']) # Now verify that we have 2 queued images # NOTE(abhishekk): We might fail with race here as queue call # will immediately start caching of an image, so we may not find # all images in queued state. output = self.list_cache() self.assertEqual(2, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) # Clear all images from cache self.cache_clear(target='queue') # Now verify that we have 0 queued images output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_clear_cached_images(self): self.start_server(enable_cache=True) images = self.load_data() # Queue 2 images for caching self.cache_queue(images['public']) self.cache_queue(images['private']) self.wait_for_caching(images['public']) self.wait_for_caching(images['private']) # Now verify that we have 2 cached images output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(2, len(output['cached_images'])) # Clear all images from cache self.cache_clear(target='cache') # Now verify that we have 0 cached images output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_clear(self): self.start_server(enable_cache=True) images = self.load_data() # Queue 2 images for caching self.cache_queue(images['public']) self.wait_for_caching(images['public']) # Now verify that we have 1 cached images output = self.list_cache() self.assertEqual(1, len(output['cached_images'])) self.cache_queue(images['private']) # Now verify that we have 1 queued and 1 cached images output = self.list_cache() # NOTE(abhishekk): We might fail with race here as queue call # will immediately start caching of an image, so we may not find # image in queued state. self.assertEqual(1, len(output['queued_images'])) self.assertEqual(1, len(output['cached_images'])) # Clear all images from cache self.cache_clear() # Now verify that we have 0 queued and cached images output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_api_negative_scenarios(self): self.start_server(enable_cache=True) images = self.load_data() # Try non-existing image to queue for caching self.cache_queue('non-existing-image-id', expected_code=404) # Verify that you can not queue non-active image self.cache_queue(images['queued'], expected_code=400) # Try to delete non-existing image from cache self.cache_delete('non-existing-image-id', expected_code=404) # Verify clearing cache fails with 400 if invalid header is passed self.cache_clear(target='both', expected_code=400) def test_cache_image_queue_delete(self): # This test verifies that if image is queued for caching # and user deletes the original image, but it is still # present in queued list and deleted with cache-delete API. self.start_server(enable_cache=True) images = self.load_data() # Ensure that nothing is cached and nothing is queued for caching output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) self.cache_queue(images['public']) # Now verify that we have 1 image queued for caching and 0 # cached images output = self.list_cache() self.assertEqual(1, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) # Verify same image is queued for caching self.assertIn(images['public'], output['queued_images']) # Delete image and verify that it is still present # in queued list path = '/v2/images/%s' % images['public'] response = self.api_delete(path) self.assertEqual(204, response.status_code) output = self.list_cache() self.assertEqual(1, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) self.assertIn(images['public'], output['queued_images']) # Deleted the image from queued list self.cache_delete(images['public']) output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_image_cache_delete(self): # This test verifies that if image is queued for caching # and user deletes the original image, but it is still # present in queued list and deleted with cache-delete API. self.start_server(enable_cache=True) images = self.load_data() # Ensure that nothing is cached and nothing is queued for caching output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) self.cache_queue(images['public']) # wait for caching the image self.wait_for_caching(images['public']) # Now verify that we have 0 queued image and 1 cached image output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(1, len(output['cached_images'])) # Verify same image cached self.assertIn(images['public'], output['cached_images'][0]['image_id']) # Delete image and verify that it is deleted from # cache as well path = '/v2/images/%s' % images['public'] response = self.api_delete(path) self.assertEqual(204, response.status_code) output = self.list_cache() self.assertEqual(0, len(output['queued_images'])) self.assertEqual(0, len(output['cached_images'])) def test_cache_api_cache_disabled(self): self.start_server(enable_cache=False) images = self.load_data() # As cache is not enabled each API call should return 404 response self.list_cache(expected_code=404) self.cache_queue(images['public'], expected_code=404) self.cache_delete(images['public'], expected_code=404) self.cache_clear(expected_code=404) self.cache_clear(target='both', expected_code=404) # Now disable cache policies and ensure that you will get 403 self.set_policy_rules({ 'cache_list': '!', 'cache_delete': '!', 'cache_image': '!', 'add_image': '', 'upload_image': '' }) self.list_cache(expected_code=403) self.cache_queue(images['public'], expected_code=403) self.cache_delete(images['public'], expected_code=403) self.cache_clear(expected_code=403) self.cache_clear(target='both', expected_code=403) def test_cache_api_not_allowed(self): self.start_server(enable_cache=True) images = self.load_data() # As cache operations are not allowed each API call should return # 403 response self.set_policy_rules({ 'cache_list': '!', 'cache_delete': '!', 'cache_image': '!', 'add_image': '', 'upload_image': '' }) self.list_cache(expected_code=403) self.cache_queue(images['public'], expected_code=403) self.cache_delete(images['public'], expected_code=403) self.cache_clear(expected_code=403) self.cache_clear(target='both', expected_code=403) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_cache_api_policy.py0000664000175000017500000002106700000000000025034 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.image_cache import prefetcher from glance.tests import functional class TestCacheImagesPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestCacheImagesPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestCacheImagesPolicy, self).start_server(enable_cache=True) def _create_upload_and_cache(self, cache_image=False, expected_code=200): image_id = self._create_and_upload() # Queue image for caching path = '/v2/queued_images/%s' % image_id response = self.api_put(path) self.assertEqual(expected_code, response.status_code) if cache_image: # NOTE(abhishekk): Here we are not running periodic job which # caches queued images as precaching is not part of this # patch, so to test all caching operations we are using this # way to cache images for us cache_prefetcher = prefetcher.Prefetcher() cache_prefetcher.run() return image_id def test_queued_images(self): self.start_server() # Verify that you can queue image for caching self._create_upload_and_cache(expected_code=200) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!', 'add_image': '', 'upload_image': '' }) self._create_upload_and_cache(expected_code=403) def test_get_queued_images(self): self.start_server() # Create image and queue it for caching image_id = self._create_upload_and_cache() # make sure you are able to get queued images path = '/v2/queued_images' response = self.api_get(path) self.assertEqual(200, response.status_code) output = response.json self.assertIn(image_id, output['queued_images']) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) response = self.api_get(path) self.assertEqual(403, response.status_code) def test_delete_queued_image(self): self.start_server() # Create image and queue it for caching image_id = self._create_upload_and_cache() # Create another image while you can second_image_id = self._create_upload_and_cache() # make sure you are able to delete queued image path = '/v2/queued_images/%s' % image_id response = self.api_delete(path) self.assertEqual(200, response.status_code) # verify image is deleted from queue list path = '/v2/queued_images' response = self.api_get(path) output = response.json self.assertNotIn(image_id, output['queued_images']) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) path = '/v2/queued_images/%s' % second_image_id response = self.api_delete(path) self.assertEqual(403, response.status_code) def test_delete_queued_images(self): self.start_server() # Create image and queue it for caching self._create_upload_and_cache() # Create another image while you can self._create_upload_and_cache() # make sure you are able to delete queued image path = '/v2/queued_images' response = self.api_delete(path) self.assertEqual(200, response.status_code) # verify images are deleted from queue list path = '/v2/queued_images' response = self.api_get(path) output = response.json self.assertEqual([], output['queued_images']) # Create another image and queue it for caching image_id = self._create_upload_and_cache() # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) path = '/v2/queued_images' response = self.api_delete(path) self.assertEqual(403, response.status_code) # Verify that image is still present in queue list self.set_policy_rules({ 'manage_image_cache': '', }) path = '/v2/queued_images' response = self.api_get(path) output = response.json self.assertIn(image_id, output['queued_images']) def test_get_cached_images(self): self.start_server() # Create image and cache it image_id = self._create_upload_and_cache(cache_image=True) # make sure you are able to get cached images path = '/v2/cached_images' response = self.api_get(path) self.assertEqual(200, response.status_code) output = response.json self.assertEqual(image_id, output['cached_images'][0]['image_id']) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) response = self.api_get(path) self.assertEqual(403, response.status_code) def test_delete_cached_image(self): self.start_server() # Create image and cache it image_id = self._create_upload_and_cache(cache_image=True) # Create another image while you can second_image_id = self._create_upload_and_cache(cache_image=True) # make sure you are able to delete cached image path = '/v2/cached_images/%s' % image_id response = self.api_delete(path) self.assertEqual(200, response.status_code) # verify image is deleted from cached list path = '/v2/cached_images' response = self.api_get(path) output = response.json self.assertEqual(1, len(output['cached_images'])) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) path = '/v2/cached_images/%s' % second_image_id response = self.api_delete(path) self.assertEqual(403, response.status_code) def test_delete_cached_images(self): self.start_server() # Create image and cache it self._create_upload_and_cache(cache_image=True) # Create another image while you can self._create_upload_and_cache(cache_image=True) # make sure you are able to delete cached image path = '/v2/cached_images' response = self.api_delete(path) self.assertEqual(200, response.status_code) # verify images are deleted from cached list path = '/v2/cached_images' response = self.api_get(path) output = response.json self.assertEqual(0, len(output['cached_images'])) # Create another image and cache it self._create_upload_and_cache(cache_image=True) # Now disable manage_image_cache to ensure you will get # 403 Forbidden error self.set_policy_rules({ 'manage_image_cache': '!' }) path = '/v2/cached_images' response = self.api_delete(path) self.assertEqual(403, response.status_code) # Verify that image is still present in cache self.set_policy_rules({ 'manage_image_cache': '', }) path = '/v2/cached_images' response = self.api_get(path) output = response.json self.assertEqual(1, len(output['cached_images'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_discovery.py0000664000175000017500000001430500000000000023565 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import http.client as http from oslo_utils import units from glance.quota import keystone as ks_quota from glance.tests import functional from glance.tests.functional.v2.test_images import get_enforcer_class from glance.tests import utils as test_utils class TestDiscovery(functional.SynchronousAPIBase): def setUp(self): super(TestDiscovery, self).setUp() self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) self.enforcer_mock = self.useFixture( fixtures.MockPatchObject(ks_quota, 'limit')).mock def set_limit(self, limits): self.enforcer_mock.Enforcer = get_enforcer_class(limits) def _assert_usage(self, expected): usage = self.api_get('/v2/info/usage') usage = usage.json['usage'] for item in ('count', 'size', 'stage'): key = 'image_%s_total' % item self.assertEqual(expected[key], usage[key], 'Mismatch in %s' % key) self.assertEqual(expected['image_count_uploading'], usage['image_count_uploading']) def test_quota_with_usage(self): self.set_limit({'image_size_total': 5, 'image_count_total': 10, 'image_stage_total': 15, 'image_count_uploading': 20}) self.start_server() # Initially we expect no usage, but our limits in place. expected = { 'image_size_total': {'limit': 5, 'usage': 0}, 'image_count_total': {'limit': 10, 'usage': 0}, 'image_stage_total': {'limit': 15, 'usage': 0}, 'image_count_uploading': {'limit': 20, 'usage': 0}, } self._assert_usage(expected) # Stage 1MiB and see our total count, uploading count, and # staging area usage increase. data = test_utils.FakeData(1 * units.Mi) image_id = self._create_and_stage(data_iter=data) expected['image_count_uploading']['usage'] = 1 expected['image_count_total']['usage'] = 1 expected['image_stage_total']['usage'] = 1 self._assert_usage(expected) # Doing the import does not change anything (since we are # synchronous and the task will not have run yet). self._import_direct(image_id, ['store1']) self._assert_usage(expected) # After the import is complete, our usage of the staging area # drops to zero, and our consumption of actual store space # reflects the new active image. self._wait_for_import(image_id) expected['image_count_uploading']['usage'] = 0 expected['image_stage_total']['usage'] = 0 expected['image_size_total']['usage'] = 1 self._assert_usage(expected) # Upload also yields a new active image and store usage. data = test_utils.FakeData(1 * units.Mi) image_id = self._create_and_upload(data_iter=data) expected['image_count_total']['usage'] = 2 expected['image_size_total']['usage'] = 2 self._assert_usage(expected) # Deleting an image drops the usage down. self.api_delete('/v2/images/%s' % image_id) expected['image_count_total']['usage'] = 1 expected['image_size_total']['usage'] = 1 self._assert_usage(expected) def test_stores(self): # NOTE(mrjoshi): As this is a functional test, we are # testing the functionality with file stores. self.start_server() # If user is admin or non-admin the store list will be # displayed. stores = self.api_get('/v2/info/stores').json['stores'] expected = { "stores": [ { "id": "store1", "default": "true" }, { "id": "store2" }, { "id": "store3" }]} self.assertEqual(expected['stores'], stores) # If user is admin the store list will be displayed # along with store properties. stores = self.api_get('/v2/info/stores/detail').json['stores'] expected = { "stores": [ { "id": "store1", "default": "true", "type": "file", "weight": 0, "properties": { "data_dir": self._store_dir('store1'), "chunk_size": 65536, "thin_provisioning": False } }, { "id": "store2", "type": "file", "weight": 0, "properties": { "data_dir": self._store_dir('store2'), "chunk_size": 65536, "thin_provisioning": False } }, { "id": "store3", "type": "file", "weight": 0, "properties": { "data_dir": self._store_dir('store3'), "chunk_size": 65536, "thin_provisioning": False } }]} self.assertEqual(expected['stores'], stores) # If user is non-admin 403 Error response will be returned. response = self.api_get('/v2/info/stores/detail', headers={'X-Roles': 'member'}) self.assertEqual(http.FORBIDDEN, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_images.py0000664000175000017500000127226500000000000023037 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import http.client as http import os import subprocess import tempfile import time import urllib import uuid import fixtures import glance_store from oslo_config import cfg from oslo_limit import exception as ol_exc from oslo_limit import limit from oslo_serialization import jsonutils from oslo_utils.secretutils import md5 from oslo_utils import units import requests from glance.common import wsgi from glance.quota import keystone as ks_quota from glance.tests import functional from glance.tests.functional import ft_utils as func_utils from glance.tests import utils as test_utils CONF = cfg.CONF TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) TENANT3 = str(uuid.uuid4()) TENANT4 = str(uuid.uuid4()) def get_auth_header(tenant, tenant_id=None, role='reader,member', headers=None): """Return headers to authenticate as a specific tenant. :param tenant: Tenant for the auth token :param tenant_id: Optional tenant ID for the X-Tenant-Id header :param role: Optional user role :param headers: Optional list of headers to add to """ if not headers: headers = {} auth_token = 'user:%s:%s' % (tenant, role) headers.update({'X-Auth-Token': auth_token}) if tenant_id: headers.update({'X-Tenant-Id': tenant_id}) return headers class TestImages(functional.FunctionalTest): def setUp(self): super(TestImages, self).setUp() self.cleanup() self.include_scrubber = False self.api_server.deployment_flavor = 'noauth' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d' % i, ret[1]) setattr(self, 'http_port%d' % i, ret[2]) def tearDown(self): for i in range(3): httpd = getattr(self, 'http_server%d' % i, None) if httpd: httpd.shutdown() httpd.server_close() super(TestImages, self).tearDown() def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_import_using_glance_direct(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # glance-direct should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("glance-direct", discovery_calls) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Upload some image data to staging area path = self._url('/v2/images/%s/stage' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'ZZZZZ' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # Verify image is in uploading state, hashes are None func_utils.verify_image_hashes_and_status(self, image_id, size=len(image_data), status='uploading') # Import image to store path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', }) data = jsonutils.dumps({'method': { 'name': 'glance-direct' }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=10, delay_sec=0.2) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(image_data), status='active') # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(5, jsonutils.loads(response.text)['size']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_using_web_download(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # web-download should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("web-download", discovery_calls) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and hashes are None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to store path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps({'method': { 'name': 'web-download', 'uri': image_data_uri }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=20, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_lifecycle(self): # Image list should be empty self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'abc': 'xyz', 'protected': True}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image_location_header = response.headers['Location'] # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'foo', 'abc', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', 'locations', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': True, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'foo': 'bar', 'abc': 'xyz', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Create another image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'bar': 'foo', 'disk_format': 'aki', 'container_format': 'aki', 'xyz': 'abc'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image2_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'bar', 'xyz', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', 'locations', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-2', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image2_id, 'protected': False, 'file': '/v2/images/%s/file' % image2_id, 'min_disk': 0, 'bar': 'foo', 'xyz': 'abc', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have two entries path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # Image list should list only image-2 as image-1 doesn't contain the # property 'bar' path = self._url('/v2/images?bar=foo') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should list only image-1 as image-2 doesn't contain the # property 'foo' path = self._url('/v2/images?foo=bar') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # The "changes-since" filter shouldn't work on glance v2 path = self._url('/v2/images?changes-since=20001007T10:10:10') response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) path = self._url('/v2/images?changes-since=aaa') response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Image list should list only image-1 based on the filter # 'protected=true' path = self._url('/v2/images?protected=true') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Image list should list only image-2 based on the filter # 'protected=false' path = self._url('/v2/images?protected=false') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should return 400 based on the filter # 'protected=False' path = self._url('/v2/images?protected=False') response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Image list should list only image-1 based on the filter # 'foo=bar&abc=xyz' path = self._url('/v2/images?foo=bar&abc=xyz') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Image list should list only image-2 based on the filter # 'bar=foo&xyz=abc' path = self._url('/v2/images?bar=foo&xyz=abc') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should not list anything as the filter 'foo=baz&abc=xyz' # is not satisfied by either images path = self._url('/v2/images?foo=baz&abc=xyz') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Get the image using the returned Location header response = requests.get(image_location_header, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(image_id, image['id']) self.assertIsNone(image['checksum']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) self.assertEqual('bar', image['foo']) self.assertTrue(image['protected']) self.assertEqual('kernel', image['type']) self.assertTrue(image['created_at']) self.assertTrue(image['updated_at']) self.assertEqual(image['updated_at'], image['created_at']) # The URI file:// should return a 400 rather than a 500 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) url = ('file://') changes = [{ 'op': 'add', 'path': '/locations/-', 'value': { 'url': url, 'metadata': {} } }] data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # The image should be mutable, including adding and removing properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'image-2'}, {'op': 'replace', 'path': '/disk_format', 'value': 'vhd'}, {'op': 'replace', 'path': '/container_format', 'value': 'ami'}, {'op': 'replace', 'path': '/foo', 'value': 'baz'}, {'op': 'add', 'path': '/ping', 'value': 'pong'}, {'op': 'replace', 'path': '/protected', 'value': True}, {'op': 'remove', 'path': '/type'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual('image-2', image['name']) self.assertEqual('vhd', image['disk_format']) self.assertEqual('baz', image['foo']) self.assertEqual('pong', image['ping']) self.assertTrue(image['protected']) self.assertNotIn('type', image, response.text) # Adding 11 image properties should fail since configured limit is 10 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) changes = [] for i in range(11): changes.append({'op': 'add', 'path': '/ping%i' % i, 'value': 'pong'}) data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code, response.text) # Adding 3 image locations should fail since configured limit is 2 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) changes = [] for i in range(3): url = ('http://127.0.0.1:%s/foo_image' % getattr(self, 'http_port%d' % i)) changes.append({'op': 'add', 'path': '/locations/-', 'value': {'url': url, 'metadata': {}}, }) data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code, response.text) # Ensure the v2.0 json-patch content type is accepted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.0-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([{'add': '/ding', 'value': 'dong'}]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual('dong', image['ding']) # Updates should persist across requests path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(image_id, image['id']) self.assertEqual('image-2', image['name']) self.assertEqual('baz', image['foo']) self.assertEqual('pong', image['ping']) self.assertTrue(image['protected']) self.assertNotIn('type', image, response.text) # Try to download data before its uploaded path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Upload some image data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'ZZZZZ' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, expect_c, expect_h, 'active', size=len(image_data)) # `disk_format` and `container_format` cannot # be replaced when the image is active. immutable_paths = ['/disk_format', '/container_format'] media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) path = self._url('/v2/images/%s' % image_id) for immutable_path in immutable_paths: data = jsonutils.dumps([ {'op': 'replace', 'path': immutable_path, 'value': 'ari'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Try to download the data that was just uploaded path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(expect_c, response.headers['Content-MD5']) self.assertEqual('ZZZZZ', response.text) # Uploading duplicate data should be rejected with a 409. The # original data should remain untouched. path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='XXX') self.assertEqual(http.CONFLICT, response.status_code) func_utils.verify_image_hashes_and_status(self, image_id, expect_c, expect_h, 'active', size=len(image_data)) # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(5, jsonutils.loads(response.text)['size']) # Should be able to deactivate image path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Change the image to public so TENANT2 can see it path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.0-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([{"replace": "/visibility", "value": "public"}]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Tenant2 should get Forbidden when deactivating the public image path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers( {'X-Tenant-Id': TENANT2})) self.assertEqual(http.FORBIDDEN, response.status_code) # Tenant2 should get Forbidden when reactivating the public image path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers( {'X-Tenant-Id': TENANT2})) self.assertEqual(http.FORBIDDEN, response.status_code) # Deactivating a deactivated image succeeds (no-op) path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Can't download a deactivated image path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.FORBIDDEN, response.status_code) # Deactivated image should still be in a listing path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # Should be able to reactivate a deactivated image path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Reactivating an active image succeeds (no-op) path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Deletion should not work on protected images path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.FORBIDDEN, response.status_code) # Unprotect image for deletion path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [{'op': 'replace', 'path': '/protected', 'value': False}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Deletion should work. Deleting image-1 path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # And neither should its data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # Image list should now contain just image-2 path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Deleting image-2 should work path = self._url('/v2/images/%s' % image2_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create image that tries to send True should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = 'true' response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # Create image that tries to send a string should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = '"hello"' response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # Create image that tries to send 123 should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = '123' response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) self.stop_servers() def _create_qcow(self, size): fn = tempfile.mktemp(prefix='glance-unittest-images-', suffix='.qcow2') subprocess.check_output( 'qemu-img create -f qcow2 %s %i' % (fn, size), shell=True) return fn def test_image_upload_qcow_virtual_size_calculation(self): self.start_servers(**self.__dict__.copy()) # Create an image headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'myqcow', 'disk_format': 'qcow2', 'container_format': 'bare'}) response = requests.post(self._url('/v2/images'), headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code, 'Failed to create: %s' % response.text) image = response.json() # Upload a qcow fn = self._create_qcow(128 * units.Mi) raw_size = os.path.getsize(fn) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(self._url('/v2/images/%s/file' % image['id']), headers=headers, data=open(fn, 'rb').read()) os.remove(fn) self.assertEqual(http.NO_CONTENT, response.status_code) # Check the image attributes response = requests.get(self._url('/v2/images/%s' % image['id']), headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = response.json() self.assertEqual(128 * units.Mi, image['virtual_size']) self.assertEqual(raw_size, image['size']) def test_image_import_qcow_virtual_size_calculation(self): self.start_servers(**self.__dict__.copy()) # Create an image headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'myqcow', 'disk_format': 'qcow2', 'container_format': 'bare'}) response = requests.post(self._url('/v2/images'), headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code, 'Failed to create: %s' % response.text) image = response.json() # Stage a qcow fn = self._create_qcow(128 * units.Mi) raw_size = os.path.getsize(fn) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(self._url('/v2/images/%s/stage' % image['id']), headers=headers, data=open(fn, 'rb').read()) os.remove(fn) self.assertEqual(http.NO_CONTENT, response.status_code) # Verify image is in uploading state and checksum is None func_utils.verify_image_hashes_and_status(self, image['id'], status='uploading', size=raw_size) # Import image to store path = self._url('/v2/images/%s/import' % image['id']) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', }) data = jsonutils.dumps({'method': { 'name': 'glance-direct' }}) response = requests.post( self._url('/v2/images/%s/import' % image['id']), headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image['id']) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=15, delay_sec=0.2) # Check the image attributes response = requests.get(self._url('/v2/images/%s' % image['id']), headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = response.json() self.assertEqual(128 * units.Mi, image['virtual_size']) self.assertEqual(raw_size, image['size']) def test_hidden_images(self): # Image list should be empty self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki', 'protected': False}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', 'locations', ]) self.assertEqual(checked_keys, set(image.keys())) # Returned image entity should have os_hidden as False expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'os_hidden': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Create another image with hidden true path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki', 'os_hidden': True}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image2_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'os_hidden', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'os_hash_algo', 'os_hash_value', 'size', 'virtual_size', 'locations', ]) self.assertEqual(checked_keys, set(image.keys())) # Returned image entity should have os_hidden as True expected_image = { 'status': 'queued', 'name': 'image-2', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image2_id, 'protected': False, 'os_hidden': True, 'file': '/v2/images/%s/file' % image2_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entries path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Image list should list should show one image based on the filter # 'hidden=false' path = self._url('/v2/images?os_hidden=false') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Image list should list should show one image based on the filter # 'hidden=true' path = self._url('/v2/images?os_hidden=true') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should return 400 based on the filter # 'hidden=abcd' path = self._url('/v2/images?os_hidden=abcd') response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Upload some image data to image-1 path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'ZZZZZ' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, expect_c, expect_h, size=len(image_data), status='active') # Upload some image data to image-2 path = self._url('/v2/images/%s/file' % image2_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'WWWWW' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image2_id, expect_c, expect_h, size=len(image_data), status='active') # Hide image-1 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/os_hidden', 'value': True}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertTrue(image['os_hidden']) # Image list should now have 0 entries path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should list should show image-1, and image-2 based # on the filter 'hidden=true' path = self._url('/v2/images?os_hidden=true') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # Un-Hide image-1 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/os_hidden', 'value': False}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertFalse(image['os_hidden']) # Image list should now have 1 entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Deleting image-1 should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Deleting image-2 should work path = self._url('/v2/images/%s' % image2_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_update_readonly_prop(self): self.start_servers(**self.__dict__.copy()) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1'}) response = requests.post(path, headers=headers, data=data) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) props = ['/id', '/file', '/location', '/schema', '/self'] for prop in props: doc = [{'op': 'replace', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) for prop in props: doc = [{'op': 'remove', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) for prop in props: doc = [{'op': 'add', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) self.stop_servers() def test_methods_that_dont_accept_illegal_bodies(self): # Check images can be reached self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) # Test all the schemas schema_urls = [ '/v2/schemas/images', '/v2/schemas/image', '/v2/schemas/members', '/v2/schemas/member', ] for value in schema_urls: path = self._url(value) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # Create image for use with tests path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] test_urls = [ ('/v2/images/%s', 'get'), ('/v2/images/%s/actions/deactivate', 'post'), ('/v2/images/%s/actions/reactivate', 'post'), ('/v2/images/%s/tags/mytag', 'put'), ('/v2/images/%s/tags/mytag', 'delete'), ('/v2/images/%s/members', 'get'), ('/v2/images/%s/file', 'get'), ('/v2/images/%s', 'delete'), ] for link, method in test_urls: path = self._url(link % image_id) data = jsonutils.dumps(["body"]) response = getattr(requests, method)( path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # DELETE /images/imgid without legal json path = self._url('/v2/images/%s' % image_id) data = '{"hello"]' response = requests.delete(path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # POST /images/imgid/members path = self._url('/v2/images/%s/members' % image_id) data = jsonutils.dumps({'member': TENANT3}) response = requests.post(path, headers=self._headers(), data=data) self.assertEqual(http.OK, response.status_code) # GET /images/imgid/members/memid path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # DELETE /images/imgid/members/memid path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) data = jsonutils.dumps(["body"]) response = requests.delete(path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) self.stop_servers() def test_download_random_access_w_range_request(self): """ Test partial download 'Range' requests for images (random image access) """ self.start_servers(**self.__dict__.copy()) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'bar': 'foo', 'disk_format': 'aki', 'container_format': 'aki', 'xyz': 'abc'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Upload data to image image_data = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # test for success on satisfiable Range request. range_ = 'bytes=3-10' headers = self._headers({'Range': range_}) path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.PARTIAL_CONTENT, response.status_code) self.assertEqual('DEFGHIJK', response.text) # test for failure on unsatisfiable Range request. range_ = 'bytes=10-5' headers = self._headers({'Range': range_}) path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, response.status_code) self.stop_servers() def test_download_random_access_w_content_range(self): """ Even though Content-Range is incorrect on requests, we support it for backward compatibility with clients written for pre-Pike Glance. The following test is for 'Content-Range' requests, which we have to ensure that we prevent regression. """ self.start_servers(**self.__dict__.copy()) # Create another image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'bar': 'foo', 'disk_format': 'aki', 'container_format': 'aki', 'xyz': 'abc'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Upload data to image image_data = 'Z' * 15 path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) result_body = '' for x in range(15): # NOTE(flaper87): Read just 1 byte. Content-Range is # 0-indexed and it specifies the first byte to read # and the last byte to read. content_range = 'bytes %s-%s/15' % (x, x) headers = self._headers({'Content-Range': content_range}) path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.PARTIAL_CONTENT, response.status_code) result_body += response.text self.assertEqual(result_body, image_data) # test for failure on unsatisfiable request for ContentRange. content_range = 'bytes 3-16/15' headers = self._headers({'Content-Range': content_range}) path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.REQUESTED_RANGE_NOT_SATISFIABLE, response.status_code) self.stop_servers() def test_download_policy_when_cache_is_not_enabled(self): rules = {'context_is_admin': 'role:admin', 'default': '', 'add_image': '', 'get_image': '', 'modify_image': '', 'upload_image': '', 'delete_image': '', 'download_image': '!'} self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Get an image should fail path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.get(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_download_image_not_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Get an image should fail path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': '_member_'}) response = requests.get(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_download_image_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Get an image should be allowed path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': 'reader,member'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_download_image_raises_service_unavailable(self): """Test image download returns HTTPServiceUnavailable.""" self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get image id image = jsonutils.loads(response.text) image_id = image['id'] # Update image locations via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) thread, httpd, http_port = test_utils.start_http_server(image_id, "image-1") values = [{'url': 'http://127.0.0.1:%s/image-1' % http_port, 'metadata': {'idx': '0'}}] doc = [{'op': 'replace', 'path': '/locations', 'value': values}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) # Download an image should work path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # Stop http server used to update image location httpd.shutdown() httpd.server_close() # Download an image should raise HTTPServiceUnavailable path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.SERVICE_UNAVAILABLE, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_image_modification_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "project_id:%(owner)s", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) self.stop_servers() def test_image_modification_fails_on_mismatched_tenant_ids(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "'A-Fake-Tenant-Id':%(owner)s", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) self.stop_servers() def test_member_additions_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "project_id:%(owner)s", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Get the image's members resource path = self._url('/v2/images/%s/members' % image_id) body = jsonutils.dumps({'member': TENANT3}) del headers['X-Roles'] response = requests.post(path, headers=headers, data=body) self.assertEqual(http.OK, response.status_code) self.stop_servers() def test_image_additions_works_only_for_specific_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "'{0}':%(owner)s".format(TENANT1), "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) self.stop_servers() def test_owning_tenant_id_can_retrieve_image_information(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "project_id:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Replace the admin role with reader and member headers['X-Roles'] = 'reader,member' # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Can retrieve the image as TENANT1 path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # Can retrieve the image's members as TENANT1 path = self._url('/v2/images/%s/members' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_owning_tenant_can_publicize_image(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "publicize_image": "project_id:%(owner)s", "get_image": "project_id:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT1, }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) def test_owning_tenant_can_communitize_image(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "communitize_image": "project_id:%(owner)s", "get_image": "project_id:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT1, }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'community'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) def test_owning_tenant_can_delete_image(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "publicize_image": "project_id:%(owner)s", "get_image": "project_id:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) def test_list_show_ok_when_get_location_allowed_for_admins(self): self.api_server.show_image_direct_url = True self.api_server.show_multiple_locations = True # setup context to allow a list locations by admin only rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "role:admin", "delete_image": "", "restricted": "", "download_image": "", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Can retrieve the image as TENANT1 path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # Can list images as TENANT1 path = self._url('/v2/images') response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) self.stop_servers() def test_image_size_cap(self): self.api_server.image_size_cap = 128 self.start_servers(**self.__dict__.copy()) # create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-size-cap-test-image', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # try to populate it with oversized data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) class StreamSim(object): # Using a one-shot iterator to force chunked transfer in the PUT # request def __init__(self, size): self.size = size def __iter__(self): yield b'Z' * self.size response = requests.put(path, headers=headers, data=StreamSim( self.api_server.image_size_cap + 1)) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) # hashlib.md5('Z'*129).hexdigest() # == '76522d28cb4418f12704dfa7acd6e7ee' # If the image has this checksum, it means that the whole stream was # accepted and written to the store, which should not be the case. path = self._url('/v2/images/{0}'.format(image_id)) headers = self._headers({'content-type': 'application/json'}) response = requests.get(path, headers=headers) image_checksum = jsonutils.loads(response.text).get('checksum') self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee') def test_permissions(self): self.start_servers(**self.__dict__.copy()) # Create an image that belongs to TENANT1 path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'raw', 'container_format': 'bare'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image_id = jsonutils.loads(response.text)['id'] # Upload some image data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # TENANT1 should see the image in their list path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(image_id, images[0]['id']) # TENANT1 should be able to access the image directly path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) # TENANT2 should not see the image in their list path = self._url('/v2/images') headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # TENANT2 should not be able to access the image directly path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # TENANT2 should not be able to modify the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT2, }) doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.NOT_FOUND, response.status_code) # TENANT2 should not be able to delete the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.delete(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # Publicize the image as an admin of TENANT1 path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Roles': 'admin', }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) # TENANT3 should now see the image in their list path = self._url('/v2/images') headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(image_id, images[0]['id']) # TENANT3 should also be able to access the image directly path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # TENANT3 still should not be able to modify the image path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT3, }) doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # TENANT3 should not be able to delete the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.delete(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code) # Image data should still be present after the failed delete path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(response.text, 'ZZZZZ') self.stop_servers() def test_property_protections_with_roles(self): # Enable property protection self.api_server.property_protection_file = self.property_file_roles self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image for role member with extra props # Raises 403 since user is not allowed to set 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Create an image for role member without 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have 'x_owner_foo' image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_owner_foo': 'o_s_bar', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Create an image for role spl_role with extra props path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'spl_create_prop': 'create_bar', 'spl_create_prop_policy': 'create_policy_bar', 'spl_read_prop': 'read_bar', 'spl_update_prop': 'update_bar', 'spl_delete_prop': 'delete_bar', 'spl_delete_empty_prop': ''}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Attempt to replace, add and remove properties which are forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/spl_read_prop', 'value': 'r'}, {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Attempt to replace, add and remove properties which are forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps([ {'op': 'add', 'path': '/spl_new_prop', 'value': 'new'}, {'op': 'remove', 'path': '/spl_create_prop'}, {'op': 'remove', 'path': '/spl_delete_prop'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Attempt to replace properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps([ # Updating an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_update_prop', 'value': ''}, {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_update_prop' has update permission for spl_role # hence the value has changed self.assertEqual('u', image['spl_update_prop']) # Attempt to remove properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/spl_delete_prop'}, # Deleting an empty property to verify bug #1332103. {'op': 'remove', 'path': '/spl_delete_empty_prop'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_delete_prop' and 'spl_delete_empty_prop' have delete # permission for spl_role hence the property has been deleted self.assertNotIn('spl_delete_prop', image.keys()) self.assertNotIn('spl_delete_empty_prop', image.keys()) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_property_protections_with_policies(self): # Enable property protection rules = { "glance_creator": "role:admin or role:spl_role" } self.set_policy_rules(rules) self.api_server.property_protection_file = self.property_file_policies self.api_server.property_protection_rule_format = 'policies' self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image for role member with extra props # Raises 403 since user is not allowed to set 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Create an image for role member without 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Create an image for role spl_role with extra props path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,spl_role, admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'spl_creator_policy': 'creator_bar', 'spl_default_policy': 'default_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('creator_bar', image['spl_creator_policy']) self.assertEqual('default_bar', image['spl_default_policy']) # Attempt to replace a property which is permitted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ # Updating an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'r'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_creator_policy' has update permission for admin # hence the value has changed self.assertEqual('r', image['spl_creator_policy']) # Attempt to replace a property which is forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,spl_role'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'z'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Attempt to read properties path = self._url('/v2/images/%s' % image_id) headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,random_role'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) # 'random_role' is allowed read 'spl_default_policy'. self.assertEqual(image['spl_default_policy'], 'default_bar') # 'random_role' is forbidden to read 'spl_creator_policy'. self.assertNotIn('spl_creator_policy', image) # Attempt to replace and remove properties which are permitted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ # Deleting an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, {'op': 'remove', 'path': '/spl_creator_policy'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_creator_policy' has delete permission for admin # hence the value has been deleted self.assertNotIn('spl_creator_policy', image) # Attempt to read a property that is permitted path = self._url('/v2/images/%s' % image_id) headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,random_role'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual(image['spl_default_policy'], 'default_bar') # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) self.stop_servers() def test_property_protections_special_chars_roles(self): # Enable property protection self.api_server.property_protection_file = self.property_file_roles self.start_servers(**self.__dict__.copy()) # Verify both admin and unknown role can create properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_admin': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_joe_soap': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Verify both admin and unknown role can read properties marked with # '@' headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can update properties marked with # '@' path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('2', image['x_all_permitted_joe_soap']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('3', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can delete properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_a': '1', 'x_all_permitted_b': '2' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_a'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_a', image.keys()) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_b'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_b', image.keys()) # Verify neither admin nor unknown role can create a property protected # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Verify neither admin nor unknown role can read properties marked with # '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_read': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) # Verify neither admin nor unknown role can update properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_update': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('1', image['x_none_update']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) # FIXME(danms): This was expecting CONFLICT, but ... should it # not be the same as the admin case above? self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Verify neither admin nor unknown role can delete properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_delete': '1', }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) # FIXME(danms): This was expecting CONFLICT, but ... should it # not be the same as the admin case above? self.assertEqual(http.FORBIDDEN, response.status_code, response.text) self.stop_servers() def test_property_protections_special_chars_policies(self): # Enable property protection self.api_server.property_protection_file = self.property_file_policies self.api_server.property_protection_rule_format = 'policies' self.start_servers(**self.__dict__.copy()) # Verify both admin and unknown role can create properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_admin': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_joe_soap': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Verify both admin and unknown role can read properties marked with # '@' headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can update properties marked with # '@' path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('2', image['x_all_permitted_joe_soap']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('3', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can delete properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_a': '1', 'x_all_permitted_b': '2' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_a'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_a', image.keys()) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_b'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_b', image.keys()) # Verify neither admin nor unknown role can create a property protected # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Verify neither admin nor unknown role can read properties marked with # '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_read': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member,joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) # Verify neither admin nor unknown role can update properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_update': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('1', image['x_none_update']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code, response.text) # Verify neither admin nor unknown role can delete properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_delete': '1', }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'reader,member,joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code, response.text) self.stop_servers() def test_tag_lifecycle(self): self.start_servers(**self.__dict__.copy()) # Create an image with a tag - duplicate should be ignored path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'tags': ['sniff', 'sniff']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image_id = jsonutils.loads(response.text)['id'] # Image should show a list with a single tag path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff'], tags) # Delete all tags for tag in tags: path = self._url('/v2/images/%s/tags/%s' % (image_id, tag)) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Update image with too many tags via PUT # Configured limit is 10 tags for i in range(10): path = self._url('/v2/images/%s/tags/foo%i' % (image_id, i)) response = requests.put(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # 11th tag should fail path = self._url('/v2/images/%s/tags/fail_me' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) # Make sure the 11th tag was not added path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(10, len(tags)) # Update image tags via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [ { 'op': 'replace', 'path': '/tags', 'value': ['foo'], }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) # Update image with too many tags via PATCH # Configured limit is 10 tags path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) tags = ['foo%d' % i for i in range(11)] doc = [ { 'op': 'replace', 'path': '/tags', 'value': tags, }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) # Tags should not have changed since request was over limit path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['foo'], tags) # Update image with duplicate tag - it should be ignored path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [ { 'op': 'replace', 'path': '/tags', 'value': ['sniff', 'snozz', 'snozz'], }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Image should show the appropriate tags path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Attempt to tag the image with a duplicate should be ignored path = self._url('/v2/images/%s/tags/snozz' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Create another more complex tag path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Double-check that the tags container on the image is populated path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['gabe@example.com', 'sniff', 'snozz'], sorted(tags)) # Query images by single tag path = self._url('/v2/images?tag=sniff') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by multiple tags path = self._url('/v2/images?tag=sniff&tag=snozz') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by tag and other attributes path = self._url('/v2/images?tag=sniff&status=queued') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by tag and a nonexistent tag path = self._url('/v2/images?tag=sniff&tag=fake') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # The tag should be deletable path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # List of tags should reflect the deletion path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Deleting the same tag should return a 404 path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # The tags won't be able to query the images after deleting path = self._url('/v2/images?tag=gabe%%40example.com') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Try to add a tag that is too long big_tag = 'a' * 300 path = self._url('/v2/images/%s/tags/%s' % (image_id, big_tag)) response = requests.put(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Tags should not have changed since request was over limit path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) self.stop_servers() def test_images_container(self): # Image list should be empty and no next link should be present self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] first = jsonutils.loads(response.text)['first'] self.assertEqual(0, len(images)) self.assertNotIn('next', jsonutils.loads(response.text)) self.assertEqual('/v2/images', first) # Create 7 images images = [] fixtures = [ {'name': 'image-3', 'type': 'kernel', 'ping': 'pong', 'container_format': 'ami', 'disk_format': 'ami'}, {'name': 'image-4', 'type': 'kernel', 'ping': 'pong', 'container_format': 'bare', 'disk_format': 'ami'}, {'name': 'image-1', 'type': 'kernel', 'ping': 'pong'}, {'name': 'image-3', 'type': 'ramdisk', 'ping': 'pong'}, {'name': 'image-2', 'type': 'kernel', 'ping': 'ding'}, {'name': 'image-3', 'type': 'kernel', 'ping': 'pong'}, {'name': 'image-2,image-5', 'type': 'kernel', 'ping': 'pong'}, ] path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) for fixture in fixtures: data = jsonutils.dumps(fixture) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) images.append(jsonutils.loads(response.text)) # Image list should contain 7 images path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(7, len(body['images'])) self.assertEqual('/v2/images', body['first']) self.assertNotIn('next', jsonutils.loads(response.text)) # Image list filters by created_at time url_template = '/v2/images?created_at=lt:%s' path = self._url(url_template % images[0]['created_at']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['images'])) self.assertEqual(url_template % images[0]['created_at'], urllib.parse.unquote(body['first'])) # Image list filters by updated_at time url_template = '/v2/images?updated_at=lt:%s' path = self._url(url_template % images[2]['updated_at']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(3, len(body['images'])) self.assertEqual(url_template % images[2]['updated_at'], urllib.parse.unquote(body['first'])) # Image list filters by updated_at and created time with invalid value url_template = '/v2/images?%s=lt:invalid_value' for filter in ['updated_at', 'created_at']: path = self._url(url_template % filter) response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Image list filters by updated_at and created_at with invalid operator url_template = '/v2/images?%s=invalid_operator:2015-11-19T12:24:02Z' for filter in ['updated_at', 'created_at']: path = self._url(url_template % filter) response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Image list filters by non-'URL encoding' value path = self._url('/v2/images?name=%FF') response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) # Image list filters by name with in operator url_template = '/v2/images?name=in:%s' filter_value = 'image-1,image-2' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(3, len(body['images'])) # Image list filters by container_format with in operator url_template = '/v2/images?container_format=in:%s' filter_value = 'bare,ami' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(2, len(body['images'])) # Image list filters by disk_format with in operator url_template = '/v2/images?disk_format=in:%s' filter_value = 'bare,ami,iso' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(2, len(body['images'])) # Begin pagination after the first image template_url = ('/v2/images?limit=2&sort_dir=asc&sort_key=name' '&marker=%s&type=kernel&ping=pong') path = self._url(template_url % images[2]['id']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(2, len(body['images'])) response_ids = [image['id'] for image in body['images']] self.assertEqual([images[6]['id'], images[0]['id']], response_ids) # Continue pagination using next link from previous request path = self._url(body['next']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(2, len(body['images'])) response_ids = [image['id'] for image in body['images']] self.assertEqual([images[5]['id'], images[1]['id']], response_ids) # Continue pagination - expect no results path = self._url(body['next']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['images'])) # Delete first image path = self._url('/v2/images/%s' % images[0]['id']) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Ensure bad request for using a deleted image as marker path = self._url('/v2/images?marker=%s' % images[0]['id']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.BAD_REQUEST, response.status_code) self.stop_servers() def test_image_visibility_to_different_users(self): self.cleanup() self.api_server.deployment_flavor = 'fakeauth' kwargs = self.__dict__.copy() self.start_servers(**kwargs) owners = ['admin', 'tenant1', 'tenant2', 'none'] visibilities = ['public', 'private', 'shared', 'community'] for owner in owners: for visibility in visibilities: path = self._url('/v2/images') headers = self._headers({ 'content-type': 'application/json', 'X-Auth-Token': 'createuser:%s:admin' % owner, }) data = jsonutils.dumps({ 'name': '%s-%s' % (owner, visibility), 'visibility': visibility, }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) def list_images(tenant, role='', visibility=None): auth_token = 'user:%s:%s' % (tenant, role) headers = {'X-Auth-Token': auth_token} path = self._url('/v2/images') if visibility is not None: path += '?visibility=%s' % visibility response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) return jsonutils.loads(response.text)['images'] # 1. Known user sees public and their own images images = list_images('tenant1', role='reader') self.assertEqual(7, len(images)) for image in images: self.assertTrue(image['visibility'] == 'public' or 'tenant1' in image['name']) # 2. Known user, visibility=public, sees all public images images = list_images('tenant1', role='reader', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 3. Known user, visibility=private, sees only their private image images = list_images('tenant1', role='reader', visibility='private') self.assertEqual(1, len(images)) image = images[0] self.assertEqual('private', image['visibility']) self.assertIn('tenant1', image['name']) # 4. Known user, visibility=shared, sees only their shared image images = list_images('tenant1', role='reader', visibility='shared') self.assertEqual(1, len(images)) image = images[0] self.assertEqual('shared', image['visibility']) self.assertIn('tenant1', image['name']) # 5. Known user, visibility=community, sees all community images images = list_images('tenant1', role='reader', visibility='community') self.assertEqual(4, len(images)) for image in images: self.assertEqual('community', image['visibility']) # 6. Unknown user sees only public images images = list_images('none', role='reader') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 7. Unknown user, visibility=public, sees only public images images = list_images('none', role='reader', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 8. Unknown user, visibility=private, sees no images images = list_images('none', role='reader', visibility='private') self.assertEqual(0, len(images)) # 9. Unknown user, visibility=shared, sees no images images = list_images('none', role='reader', visibility='shared') self.assertEqual(0, len(images)) # 10. Unknown user, visibility=community, sees only community images images = list_images('none', role='reader', visibility='community') self.assertEqual(4, len(images)) for image in images: self.assertEqual('community', image['visibility']) # 11. Unknown admin sees all images except for community images images = list_images('none', role='admin') self.assertEqual(12, len(images)) # 12. Unknown admin, visibility=public, shows only public images images = list_images('none', role='admin', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 13. Unknown admin, visibility=private, sees only private images images = list_images('none', role='admin', visibility='private') self.assertEqual(4, len(images)) for image in images: self.assertEqual('private', image['visibility']) # 14. Unknown admin, visibility=shared, sees only shared images images = list_images('none', role='admin', visibility='shared') self.assertEqual(4, len(images)) for image in images: self.assertEqual('shared', image['visibility']) # 15. Unknown admin, visibility=community, sees only community images images = list_images('none', role='admin', visibility='community') self.assertEqual(4, len(images)) for image in images: self.assertEqual('community', image['visibility']) # 16. Known admin sees all images, except community images owned by # others images = list_images('admin', role='admin') self.assertEqual(13, len(images)) # 17. Known admin, visibility=public, sees all public images images = list_images('admin', role='admin', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 18. Known admin, visibility=private, sees all private images images = list_images('admin', role='admin', visibility='private') self.assertEqual(4, len(images)) for image in images: self.assertEqual('private', image['visibility']) # 19. Known admin, visibility=shared, sees all shared images images = list_images('admin', role='admin', visibility='shared') self.assertEqual(4, len(images)) for image in images: self.assertEqual('shared', image['visibility']) # 20. Known admin, visibility=community, sees all community images images = list_images('admin', role='admin', visibility='community') self.assertEqual(4, len(images)) for image in images: self.assertEqual('community', image['visibility']) self.stop_servers() def test_update_locations(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) # Update locations for the queued image path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': url, 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # The image size should be updated path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(10, image['size']) def test_update_locations_with_restricted_sources(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) # Update locations for the queued image path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': 'file:///foo_image', 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': 'swift+config:///foo_image', 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) def test_add_location_with_do_secure_hash_true_negative(self): self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) # Add Location with non image owner path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT2}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 data = jsonutils.dumps({'url': url}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.NOT_FOUND, response.status_code, response.text) # Add location with invalid validation_data # Invalid os_hash_value validation_data = { 'os_hash_algo': "sha512", 'os_hash_value': "dbc9e0f80d131e64b94913a7b40bb5" } headers = self._headers({'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data (without os_hash_algo) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 with requests.get(url) as r: expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = {'os_hash_value': expect_h} data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data & # (invalid hash_algo) validation_data = { 'os_hash_algo': 'sha123', 'os_hash_value': expect_h} data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data # (mismatch hash_value with hash algo) with requests.get(url) as r: expect_h = str(hashlib.sha256(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) self.stop_servers() def test_add_location_with_do_secure_hash_true(self): self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Add location with os_hash_algo other than sha512 path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha256(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha256', 'os_hash_value': expect_h} data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1) # Show Image path = self._url('/v2/images/%s' % image_id) resp = requests.get(path, headers=headers) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) # Add location with valid validation data # os_hash_algo value sha512 # Create an image 2 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) # Show Image path = self._url('/v2/images/%s' % image_id) resp = requests.get(path, headers=self._headers()) output = jsonutils.loads(resp.text) self.assertEqual('queued', output['status']) func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1) # Show Image resp = requests.get(path, headers=self._headers()) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) # Add Location with valid URL and do_secure_hash = True # without validation_data # Create an image 3 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) data = jsonutils.dumps({'url': url}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1) # Show Image path = self._url('/v2/images/%s' % image_id) resp = requests.get(path, headers=headers) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) self.stop_servers() def test_add_location_with_do_secure_hash_false(self): self.api_server.do_secure_hash = False self.start_servers(**self.__dict__.copy()) # Add Location with valid URL and do_secure_hash = False # with validation_data # Create an image 1 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 with requests.get(url) as r: expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'url': url, 'validation_data': validation_data}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=2, delay_sec=0.2, start_delay_sec=1) # Add Location with valid URL and do_secure_hash = False # without validation_data # Create an image 2 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 path = self._url('/v2/images/%s/locations' % image_id) headers = self._headers({'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'url': url}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=2, delay_sec=0.2, start_delay_sec=1) self.stop_servers() def test_get_location(self): self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Get locations of `queued` image headers = self._headers({'X-Roles': 'service'}) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code, response.text) self.assertEqual(0, len(jsonutils.loads(response.text))) self.assertEqual('queued', image['status']) # Get location of invalid image image_id = str(uuid.uuid4()) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code, response.text) # Add Location with valid URL and image owner image_id = image['id'] path = self._url('/v2/images/%s/locations' % image_id) url = 'http://127.0.0.1:%s/foo_image' % self.http_port1 data = jsonutils.dumps({'url': url}) response = requests.post(path, headers=headers, data=data) self.assertEqual(202, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) headers = self._headers({'content-type': 'application/json'}) func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1) # Get Locations not allowed for any other user headers = self._headers({'X-Roles': 'admin,member'}) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Get Locations allowed only for service user headers = self._headers({'X-Roles': 'service'}) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code, response.text) self.stop_servers() def test_get_location_with_data_upload(self): self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Upload some image data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) image_data = b'ZZZZZ' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, expect_c, expect_h, 'active', size=len(image_data)) # Get Locations not allowed for any other user headers = self._headers({'X-Roles': 'admin,member'}) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Get Locations allowed only for service user headers = self._headers({'X-Roles': 'service'}) path = self._url('/v2/images/%s/locations' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code, response.text) output = jsonutils.loads(response.text) self.assertTrue(output[0]['url']) self.stop_servers() class TestImagesIPv6(functional.FunctionalTest): """Verify that API and REG servers running IPv6 can communicate""" def setUp(self): """ First applying monkey patches of functions and methods which have IPv4 hardcoded. """ # Setting up initial monkey patch (1) test_utils.get_unused_port_ipv4 = test_utils.get_unused_port test_utils.get_unused_port_and_socket_ipv4 = ( test_utils.get_unused_port_and_socket) test_utils.get_unused_port = test_utils.get_unused_port_ipv6 test_utils.get_unused_port_and_socket = ( test_utils.get_unused_port_and_socket_ipv6) super(TestImagesIPv6, self).setUp() self.cleanup() # Setting up monkey patch (2), after object is ready... self.ping_server_ipv4 = self.ping_server self.ping_server = self.ping_server_ipv6 self.include_scrubber = False def tearDown(self): # Cleaning up monkey patch (2). self.ping_server = self.ping_server_ipv4 super(TestImagesIPv6, self).tearDown() # Cleaning up monkey patch (1). test_utils.get_unused_port = test_utils.get_unused_port_ipv4 test_utils.get_unused_port_and_socket = ( test_utils.get_unused_port_and_socket_ipv4) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_list_ipv6(self): # Image list should be empty self.api_server.deployment_flavor = "caching" self.start_servers(**self.__dict__.copy()) url = f'http://[::1]:{self.api_port}' path = '/' requests.get(url + path, headers=self._headers()) path = '/v2/images' response = requests.get(url + path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) class TestImageDirectURLVisibility(functional.FunctionalTest): def setUp(self): super(TestImageDirectURLVisibility, self).setUp() self.cleanup() self.include_scrubber = False self.api_server.deployment_flavor = 'noauth' def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_direct_url_visible(self): self.api_server.show_image_direct_url = True self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'visibility': 'public'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image direct_url should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('direct_url', image) # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Image direct_url should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('direct_url', image) # Image direct_url should be visible to non-owner, non-admin user path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json', 'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('direct_url', image) # Image direct_url should be visible in a list path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text)['images'][0] self.assertIn('direct_url', image) self.stop_servers() def test_image_multiple_location_url_visible(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image locations should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual([], image["locations"]) # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Image locations should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) loc = image['locations'] self.assertGreater(len(loc), 0) loc = loc[0] self.assertIn('url', loc) self.assertIn('metadata', loc) self.stop_servers() def test_image_direct_url_not_visible(self): self.api_server.show_image_direct_url = False self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(http.NO_CONTENT, response.status_code) # Image direct_url should not be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('direct_url', image) # Image direct_url should not be visible in a list path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text)['images'][0] self.assertNotIn('direct_url', image) self.stop_servers() class TestImageLocationSelectionStrategy(functional.FunctionalTest): def setUp(self): super(TestImageLocationSelectionStrategy, self).setUp() self.cleanup() self.include_scrubber = False self.api_server.deployment_flavor = 'noauth' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d' % i, ret[1]) setattr(self, 'http_port%d' % i, ret[2]) def tearDown(self): for i in range(3): httpd = getattr(self, 'http_server%d' % i, None) if httpd: httpd.shutdown() httpd.server_close() super(TestImageLocationSelectionStrategy, self).tearDown() def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_locations_with_order_strategy(self): self.api_server.show_image_direct_url = True self.api_server.show_multiple_locations = True self.image_location_quota = 10 self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image locations should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual([], image["locations"]) # Update image locations via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) values = [{'url': 'http://127.0.0.1:%s/foo_image' % self.http_port0, 'metadata': {}}, {'url': 'http://127.0.0.1:%s/foo_image' % self.http_port1, 'metadata': {}}] doc = [{'op': 'replace', 'path': '/locations', 'value': values}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code) # Image locations should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual(values, image['locations']) self.assertIn('direct_url', image) self.assertEqual(values[0]['url'], image['direct_url']) self.stop_servers() class TestImageMembers(functional.FunctionalTest): def setUp(self): super(TestImageMembers, self).setUp() self.cleanup() self.include_scrubber = False self.api_server.deployment_flavor = 'fakeauth' self.start_servers(**self.__dict__.copy()) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_member_lifecycle(self): # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) owners = ['tenant1', 'tenant2', 'admin'] visibilities = ['community', 'private', 'public', 'shared'] image_fixture = [] for owner in owners: for visibility in visibilities: path = self._url('/v2/images') role = 'member' if visibility == 'public': role = 'admin' headers = self._headers({ 'content-type': 'application/json', 'X-Auth-Token': 'createuser:%s:admin' % owner, 'X-Roles': role, }) data = jsonutils.dumps({ 'name': '%s-%s' % (owner, visibility), 'visibility': visibility, }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image_fixture.append(jsonutils.loads(response.text)) # Image list should contain 6 images for tenant1 path = self._url('/v2/images') response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(6, len(images)) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Add Image member for tenant1-shared image path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) body = jsonutils.dumps({'member': TENANT3}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[3]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) self.assertEqual('pending', image_member['status']) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Image list should contain 0 shared images for TENANT3 # because default is accepted path = self._url('/v2/images?visibility=shared') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 4 images for TENANT3 with status pending path = self._url('/v2/images?member_status=pending') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 4 images for TENANT3 with status all path = self._url('/v2/images?member_status=all') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 1 image for TENANT3 with status pending # and visibility shared path = self._url('/v2/images?member_status=pending&visibility=shared') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(images[0]['name'], 'tenant1-shared') # Image list should contain 0 image for TENANT3 with status rejected # and visibility shared path = self._url('/v2/images?member_status=rejected&visibility=shared') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status accepted # and visibility shared path = self._url('/v2/images?member_status=accepted&visibility=shared') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status accepted # and visibility private path = self._url('/v2/images?visibility=private') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image tenant2-shared's image members list should contain no members path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) response = requests.get(path, headers=get_auth_header('tenant2')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Tenant 1, who is the owner cannot change status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Tenant 1, who is the owner can get status of its own image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[3]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 3, who is the member can get status of its own status path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[3]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 2, who not the owner cannot get status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header('tenant2')) self.assertEqual(http.NOT_FOUND, response.status_code) # Tenant 3 can change status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_auth_header(TENANT3), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[3]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertEqual('accepted', image_member['status']) # Image list should contain 4 images for TENANT3 because status is # accepted path = self._url('/v2/images') response = requests.get(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Tenant 3 invalid status change path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'invalid-status'}) response = requests.put(path, headers=get_auth_header(TENANT3), data=body) self.assertEqual(http.BAD_REQUEST, response.status_code) # Owner can Upload data to staging image image_id = image_fixture[3]['id'] path = self._url('/v2/images/%s/stage' % image_id) headers = get_auth_header('tenant1') headers.update({'Content-Type': 'application/octet-stream'}) image_data = b'YYYYY' response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # Tenant3: can't upload data to tenant1-shared staging image path = self._url('/v2/images/%s/stage' % image_id) image_data = b'YYYYY' headers.update(get_auth_header(TENANT3)) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.FORBIDDEN, response.status_code) # Owner cannot change status of image path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member for tenant2-shared image path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) body = jsonutils.dumps({'member': TENANT4}) response = requests.post(path, headers=get_auth_header('tenant2'), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[7]['id'], image_member['image_id']) self.assertEqual(TENANT4, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) # Add Image member to public image path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member to private image path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member to community image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Image tenant1-shared's members list should contain 1 member path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Admin can see any members path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_auth_header('tenant1', role='admin')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Image members not found for private image not owned by TENANT 1 path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.NOT_FOUND, response.status_code) # Image members forbidden for public image path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image members forbidden for community image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image members forbidden for private image path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image Member Cannot delete Image membership path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) response = requests.delete(path, headers=get_auth_header(TENANT3)) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[3]['id'], TENANT3)) response = requests.delete(path, headers=get_auth_header('tenant1')) self.assertEqual(http.NO_CONTENT, response.status_code) # Now the image has no members path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Adding 11 image members should fail since configured limit is 10 path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) for i in range(10): body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.OK, response.status_code) body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_auth_header('tenant1'), data=body) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) # Get Image member should return not found for public image path = self._url('/v2/images/%s/members/%s' % (image_fixture[2]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.NOT_FOUND, response.status_code) # Get Image member should return not found for community image path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.NOT_FOUND, response.status_code) # Get Image member should return not found for private image path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.get(path, headers=get_auth_header('tenant1')) self.assertEqual(http.NOT_FOUND, response.status_code) # Delete Image member should return forbidden for public image path = self._url('/v2/images/%s/members/%s' % (image_fixture[2]['id'], TENANT3)) response = requests.delete(path, headers=get_auth_header('tenant1')) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member should return forbidden for community image path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], TENANT3)) response = requests.delete(path, headers=get_auth_header('tenant1')) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member should return forbidden for private image path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.delete(path, headers=get_auth_header('tenant1')) self.assertEqual(http.FORBIDDEN, response.status_code) self.stop_servers() class TestQuotas(functional.FunctionalTest): def setUp(self): super(TestQuotas, self).setUp() self.cleanup() self.include_scrubber = False self.api_server.deployment_flavor = 'noauth' self.user_storage_quota = 100 self.start_servers(**self.__dict__.copy()) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def _upload_image_test(self, data_src, expected_status): # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image (with a deployer-defined property) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'testimg', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # upload data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=data_src) self.assertEqual(expected_status, response.status_code) # Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) def test_image_upload_under_quota(self): data = b'x' * (self.user_storage_quota - 1) self._upload_image_test(data, http.NO_CONTENT) def test_image_upload_exceed_quota(self): data = b'x' * (self.user_storage_quota + 1) self._upload_image_test(data, http.REQUEST_ENTITY_TOO_LARGE) def test_chunked_image_upload_under_quota(self): def data_gen(): yield b'x' * (self.user_storage_quota - 1) self._upload_image_test(data_gen(), http.NO_CONTENT) def test_chunked_image_upload_exceed_quota(self): def data_gen(): yield b'x' * (self.user_storage_quota + 1) self._upload_image_test(data_gen(), http.REQUEST_ENTITY_TOO_LARGE) class TestImagesMultipleBackend(functional.MultipleBackendFunctionalTest): def setUp(self): super(TestImagesMultipleBackend, self).setUp() self.cleanup() self.include_scrubber = False self.api_server_multiple_backend.deployment_flavor = 'noauth' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d' % i, ret[1]) setattr(self, 'http_port%d' % i, ret[2]) def tearDown(self): for i in range(3): httpd = getattr(self, 'http_server%d' % i, None) if httpd: httpd.shutdown() httpd.server_close() super(TestImagesMultipleBackend, self).tearDown() def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_import_using_glance_direct(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # glance-direct should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("glance-direct", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Upload some image data to staging area image_data = b'QQQQQ' path = self._url('/v2/images/%s/stage' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # Verify image is in uploading state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, size=len(image_data), status='uploading') # Import image to store path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', }) data = jsonutils.dumps({'method': { 'name': 'glance-direct' }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=15, delay_sec=0.2) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(image_data), status='active') # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(len(image_data), jsonutils.loads(response.text)['size']) # Ensure image is created in default backend self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_using_glance_direct_different_backend(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # glance-direct should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("glance-direct", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Upload some image data to staging area image_data = b'GLANCE IS DEAD SEXY' path = self._url('/v2/images/%s/stage' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) # Verify image is in uploading state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, size=len(image_data), status='uploading') # Import image to file2 store (other than default backend) path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', 'X-Image-Meta-Store': 'file2' }) data = jsonutils.dumps({'method': { 'name': 'glance-direct' }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=15, delay_sec=0.2) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(image_data), status='active') # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(len(image_data), jsonutils.loads(response.text)['size']) # Ensure image is created in different backend self.assertIn('file2', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_using_web_download(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # web-download should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("web-download", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to store path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps({'method': { 'name': 'web-download', 'uri': image_data_uri }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=20, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in default backend path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_using_web_download_different_backend(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # web-download should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("web-download", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to store path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin', 'X-Image-Meta-Store': 'file2' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps({'method': { 'name': 'web-download', 'uri': image_data_uri }}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=20, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in different backend path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file2', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_multi_stores(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # web-download should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("web-download", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to multiple stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps( {'method': {'name': 'web-download', 'uri': image_data_uri}, 'stores': ['file1', 'file2']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=40, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in the two stores path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file2', jsonutils.loads(response.text)['stores']) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_copy_image_lifecycle(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # copy-image should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("copy-image", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to multiple stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps( {'method': {'name': 'web-download', 'uri': image_data_uri}, 'stores': ['file1']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) import_reqid = response.headers['X-Openstack-Request-Id'] # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=40, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in the one store path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Ensure image has one task associated with it path = self._url('/v2/images/%s/tasks' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(1, len(tasks)) for task in tasks: self.assertEqual(image_id, task['image_id']) user_id = response.request.headers.get( 'X-User-Id') self.assertEqual(user_id, task['user_id']) self.assertEqual(import_reqid, task['request_id']) # Copy newly created image to file2 and file3 stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) data = jsonutils.dumps( {'method': {'name': 'copy-image'}, 'stores': ['file2', 'file3']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) copy_reqid = response.headers['X-Openstack-Request-Id'] # Verify image is copied # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_copying(request_path=path, request_headers=self._headers(), stores=['file2', 'file3'], max_sec=40, delay_sec=0.2, start_delay_sec=1) # Ensure image is copied to the file2 and file3 store path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file2', jsonutils.loads(response.text)['stores']) self.assertIn('file3', jsonutils.loads(response.text)['stores']) # Ensure image has two tasks associated with it path = self._url('/v2/images/%s/tasks' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(2, len(tasks)) expected_reqids = [copy_reqid, import_reqid] for task in tasks: self.assertEqual(image_id, task['image_id']) user_id = response.request.headers.get( 'X-User-Id') self.assertEqual(user_id, task['user_id']) self.assertEqual(expected_reqids.pop(), task['request_id']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_copy_image_revert_lifecycle(self): # Test if copying task fails in between then the rollback # should delete the data from only stores to which it is # copied and not from the existing stores. self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # copy-image should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("copy-image", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to multiple stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps( {'method': {'name': 'web-download', 'uri': image_data_uri}, 'stores': ['file1']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=40, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in the one store path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Copy newly created image to file2 and file3 stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # NOTE(abhishekk): Deleting file3 image directory to trigger the # failure, so that we can verify that revert call does not delete # the data from existing stores # NOTE(danms): Do this before we start the import, on a later store, # which will cause that store to fail after we have already completed # the first one. os.rmdir(self.test_dir + "/images_3") data = jsonutils.dumps( {'method': {'name': 'copy-image'}, 'stores': ['file2', 'file3']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) def poll_callback(image): # NOTE(danms): We need to wait for the specific # arrangement we're expecting, which is that file3 has # failed, nothing else is importing, and file2 has been # removed from stores by the revert. return not (image['os_glance_importing_to_stores'] == '' and image['os_glance_failed_import'] == 'file3' and image['stores'] == 'file1') func_utils.poll_entity(self._url('/v2/images/%s' % image_id), self._headers(), poll_callback) # Here we check that the failure of 'file3' caused 'file2' to # be removed from image['stores'], and that 'file3' is reported # as failed in the appropriate status list. Since the import # started with 'store1' being populated, that should remain, # but 'store2' should be reverted/removed. path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) self.assertNotIn('file2', jsonutils.loads(response.text)['stores']) self.assertNotIn('file3', jsonutils.loads(response.text)['stores']) fail_key = 'os_glance_failed_import' pend_key = 'os_glance_importing_to_stores' self.assertEqual('file3', jsonutils.loads(response.text)[fail_key]) self.assertEqual('', jsonutils.loads(response.text)[pend_key]) # Copy newly created image to file2 and file3 stores and # all_stores_must_succeed set to false. path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) data = jsonutils.dumps( {'method': {'name': 'copy-image'}, 'stores': ['file2', 'file3'], 'all_stores_must_succeed': False}) for i in range(0, 5): response = requests.post(path, headers=headers, data=data) if response.status_code != http.CONFLICT: break # We might race with the revert of the previous task and do not # really have a good way to make sure that it's done. In order # to make sure we tolerate the 409 possibility when import # locking is added, gracefully wait a few times before failing. time.sleep(1) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is copied # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_copying(request_path=path, request_headers=self._headers(), stores=['file2'], max_sec=10, delay_sec=0.2, start_delay_sec=1, failure_scenario=True) # Ensure data is not deleted from existing stores as well as # from the stores where it is copied successfully path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) self.assertIn('file2', jsonutils.loads(response.text)['stores']) self.assertNotIn('file3', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_import_multi_stores_specifying_all_stores(self): self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # web-download should be available in discovery response path = self._url('/v2/info/import') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['import-methods']['value'] self.assertIn("web-download", discovery_calls) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Verify image is in queued state and checksum is None func_utils.verify_image_hashes_and_status(self, image_id, status='queued') # Import image to multiple stores path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps( {'method': {'name': 'web-download', 'uri': image_data_uri}, 'all_stores': True}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=40, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() # Ensure image is created in the two stores path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file3', jsonutils.loads(response.text)['stores']) self.assertIn('file2', jsonutils.loads(response.text)['stores']) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Deleting image should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_lifecycle(self): # Image list should be empty self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'abc': 'xyz', 'protected': True}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'foo', 'abc', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': True, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'foo': 'bar', 'abc': 'xyz', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Try to download data before its uploaded path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Upload some image data image_data = b'OpenStack Rules, Other Clouds Drool' path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(image_data), status='active') # Ensure image is created in default backend path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file1', jsonutils.loads(response.text)['stores']) # Try to download the data that was just uploaded path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(expect_c, response.headers['Content-MD5']) self.assertEqual(image_data.decode('utf-8'), response.text) # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(len(image_data), jsonutils.loads(response.text)['size']) # Unprotect image for deletion path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [{'op': 'replace', 'path': '/protected', 'value': False}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Deletion should work. Deleting image path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # And neither should its data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() def test_image_lifecycle_different_backend(self): # Image list should be empty self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # file1 and file2 should be available in discovery response available_stores = ['file1', 'file2', 'file3'] path = self._url('/v2/info/stores') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) discovery_calls = jsonutils.loads( response.text)['stores'] # os_glance_staging_store should not be available in discovery response for stores in discovery_calls: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertFalse(stores["id"].startswith("os_glance_")) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'abc': 'xyz', 'protected': True}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Check 'OpenStack-image-store-ids' header present in response self.assertIn('OpenStack-image-store-ids', response.headers) for store in available_stores: self.assertIn(store, response.headers['OpenStack-image-store-ids']) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ 'status', 'name', 'tags', 'created_at', 'updated_at', 'visibility', 'self', 'protected', 'id', 'file', 'min_disk', 'foo', 'abc', 'type', 'min_ram', 'schema', 'disk_format', 'container_format', 'owner', 'checksum', 'size', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value' ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'shared', 'self': '/v2/images/%s' % image_id, 'protected': True, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'foo': 'bar', 'abc': 'xyz', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Try to download data before its uploaded path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Upload some image data image_data = b'just a passing glance' path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({ 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Store': 'file2' }) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(http.NO_CONTENT, response.status_code) expect_c = str(md5(image_data, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(image_data).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(image_data), status='active') # Ensure image is created in different backend path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file2', jsonutils.loads(response.text)['stores']) # Try to download the data that was just uploaded path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(expect_c, response.headers['Content-MD5']) self.assertEqual(image_data.decode('utf-8'), response.text) # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(len(image_data), jsonutils.loads(response.text)['size']) # Unprotect image for deletion path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [{'op': 'replace', 'path': '/protected', 'value': False}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Deletion should work. Deleting image path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # And neither should its data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) self.stop_servers() class TestMultiStoreImageMembers(functional.MultipleBackendFunctionalTest): def setUp(self): super(TestMultiStoreImageMembers, self).setUp() self.cleanup() self.include_scrubber = False self.api_server_multiple_backend.deployment_flavor = 'noauth' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d' % i, ret[1]) setattr(self, 'http_port%d' % i, ret[2]) def tearDown(self): for i in range(3): httpd = getattr(self, 'http_server%d' % i, None) if httpd: httpd.shutdown() httpd.server_close() super(TestMultiStoreImageMembers, self).tearDown() def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def test_image_member_lifecycle_for_multiple_stores(self): self.start_servers(**self.__dict__.copy()) try: def get_header(tenant, tenant_id=None, role=''): return self._headers(custom_headers=get_auth_header( tenant, tenant_id, role)) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) owners = ['tenant1', 'tenant2', 'admin'] visibilities = ['community', 'private', 'public', 'shared'] image_fixture = [] for owner in owners: for visibility in visibilities: path = self._url('/v2/images') role = 'member' if visibility == 'public': role = 'admin' headers = self._headers(custom_headers={ 'content-type': 'application/json', 'X-Auth-Token': 'createuser:%s:admin' % owner, 'X-Roles': role, }) data = jsonutils.dumps({ 'name': '%s-%s' % (owner, visibility), 'visibility': visibility, }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image_fixture.append(jsonutils.loads(response.text)) # Image list should contain 12 images for tenant1 path = self._url('/v2/images') response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(12, len(images)) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Add Image member for tenant1-shared image path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) body = jsonutils.dumps({'member': TENANT3}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[3]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) self.assertEqual('pending', image_member['status']) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Image list should contain 0 shared images for TENANT3 # because default is accepted path = self._url('/v2/images?visibility=shared') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 4 images for TENANT3 with status # pending path = self._url('/v2/images?member_status=pending') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 4 images for TENANT3 with status all path = self._url('/v2/images?member_status=all') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 1 image for TENANT3 with status pending # and visibility shared path = self._url( '/v2/images?member_status=pending&visibility=shared') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(images[0]['name'], 'tenant1-shared') # Image list should contain 0 image for TENANT3 with status # rejected and visibility shared path = self._url( '/v2/images?member_status=rejected&visibility=shared') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status # accepted and visibility shared path = self._url( '/v2/images?member_status=accepted&visibility=shared') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status # accepted and visibility private path = self._url('/v2/images?visibility=private') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image tenant2-shared's image members list should contain # no members path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) response = requests.get(path, headers=get_header('tenant2')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Tenant 1, who is the owner cannot change status of image member path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Tenant 1, who is the owner can get status of its own image member path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[3]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 3, who is the member can get status of its own status path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[3]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 2, who not the owner cannot get status of image member path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) response = requests.get(path, headers=get_header( 'tenant2', tenant_id=TENANT2)) self.assertEqual(http.NOT_FOUND, response.status_code) # Tenant 3 can change status of image member path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header( TENANT3, tenant_id=TENANT3), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[3]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertEqual('accepted', image_member['status']) # Image list should contain 4 images for TENANT3 because status is # accepted path = self._url('/v2/images') response = requests.get(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.OK, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Tenant 3 invalid status change path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'invalid-status'}) response = requests.put(path, headers=get_header( TENANT3, tenant_id=TENANT3), data=body) self.assertEqual(http.BAD_REQUEST, response.status_code) # Owner cannot change status of image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member for tenant2-shared image path = self._url('/v2/images/%s/members' % image_fixture[7]['id']) body = jsonutils.dumps({'member': TENANT4}) response = requests.post(path, headers=get_header('tenant2'), data=body) self.assertEqual(http.OK, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[7]['id'], image_member['image_id']) self.assertEqual(TENANT4, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) # Add Image member to public image path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member to private image path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Add Image member to community image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.FORBIDDEN, response.status_code) # Image tenant1-shared's members list should contain 1 member path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Admin can see any members path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_header('tenant1', tenant_id=TENANT1, role='admin')) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Image members forbidden for public image path = self._url('/v2/images/%s/members' % image_fixture[2]['id']) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image members forbidden for community image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image members forbidden for private image path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertIn("Only shared images have members", response.text) self.assertEqual(http.FORBIDDEN, response.status_code) # Image Member Cannot delete Image membership path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) response = requests.delete(path, headers=get_header( TENANT3, tenant_id=TENANT3)) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member path = self._url('/v2/images/%s/members/%s' % ( image_fixture[3]['id'], TENANT3)) response = requests.delete(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.NO_CONTENT, response.status_code) # Now the image has no members path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.OK, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Adding 11 image members should fail since configured limit is 10 path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) for i in range(10): body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.OK, response.status_code) body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_header( 'tenant1', tenant_id=TENANT1), data=body) self.assertEqual(http.REQUEST_ENTITY_TOO_LARGE, response.status_code) # Get Image member should return not found for public image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[2]['id'], TENANT3)) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.NOT_FOUND, response.status_code) # Get Image member should return not found for community image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[0]['id'], TENANT3)) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.NOT_FOUND, response.status_code) # Get Image member should return not found for private image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[1]['id'], TENANT3)) response = requests.get(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.NOT_FOUND, response.status_code) # Delete Image member should return forbidden for public image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[2]['id'], TENANT3)) response = requests.delete(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member should return forbidden for community image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[0]['id'], TENANT3)) response = requests.delete(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete Image member should return forbidden for private image path = self._url('/v2/images/%s/members/%s' % ( image_fixture[1]['id'], TENANT3)) response = requests.delete(path, headers=get_header( 'tenant1', tenant_id=TENANT1)) self.assertEqual(http.FORBIDDEN, response.status_code) except requests.exceptions.ConnectionError as e: # NOTE(abhishekk): This test fails intermittently for py37 # environment refer, # https://bugs.launchpad.net/glance/+bug/1873735 self.skipTest("Remote connection closed abruptly: %s" % e.args[0]) self.stop_servers() class TestCopyImagePermissions(functional.MultipleBackendFunctionalTest): def setUp(self): super(TestCopyImagePermissions, self).setUp() self.cleanup() self.include_scrubber = False self.api_server_multiple_backend.deployment_flavor = 'noauth' def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def _create_and_import_image_data(self): # Create a public image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'visibility': 'public', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', 'X-Roles': 'admin' }) # Start http server locally thread, httpd, port = test_utils.start_standalone_http_server() image_data_uri = 'http://localhost:%s/' % port data = jsonutils.dumps( {'method': {'name': 'web-download', 'uri': image_data_uri}, 'stores': ['file1']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is in active state and checksum is set # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_status(self, request_path=path, request_headers=self._headers(), status='active', max_sec=40, delay_sec=0.2, start_delay_sec=1) with requests.get(image_data_uri) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) func_utils.verify_image_hashes_and_status(self, image_id, checksum=expect_c, os_hash_value=expect_h, size=len(r.content), status='active') # kill the local http server httpd.shutdown() httpd.server_close() return image_id def _test_copy_public_image_as_non_admin(self): self.start_servers(**self.__dict__.copy()) # Create a publicly-visible image as TENANT1 image_id = self._create_and_import_image_data() # Ensure image is created in the one store path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual('file1', jsonutils.loads(response.text)['stores']) # Copy newly created image to file2 store as TENANT2 path = self._url('/v2/images/%s/import' % image_id) headers = self._headers({ 'content-type': 'application/json', }) headers = get_auth_header(TENANT2, TENANT2, role='reader,member', headers=headers) data = jsonutils.dumps( {'method': {'name': 'copy-image'}, 'stores': ['file2']}) response = requests.post(path, headers=headers, data=data) return image_id, response def test_copy_public_image_as_non_admin(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "", "download_image": "", "add_member": "", "publicize_image": "", "copy_image": "role:admin", } self.set_policy_rules(rules) image_id, response = self._test_copy_public_image_as_non_admin() # Expect failure to copy another user's image self.assertEqual(http.FORBIDDEN, response.status_code) def test_copy_public_image_as_non_admin_permitted(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "", "download_image": "", "add_member": "", "publicize_image": "", "copy_image": "'public':%(visibility)s", } self.set_policy_rules(rules) image_id, response = self._test_copy_public_image_as_non_admin() # Expect success because image is public self.assertEqual(http.ACCEPTED, response.status_code) # Verify image is copied # NOTE(abhishekk): As import is a async call we need to provide # some timelap to complete the call. path = self._url('/v2/images/%s' % image_id) func_utils.wait_for_copying(request_path=path, request_headers=self._headers(), stores=['file2'], max_sec=40, delay_sec=0.2, start_delay_sec=1) # Ensure image is copied to the file2 and file3 store path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertIn('file2', jsonutils.loads(response.text)['stores']) class TestImportProxy(functional.SynchronousAPIBase): """Test the image import proxy-to-stage-worker behavior. This is done as a SynchronousAPIBase test with one mock for a couple of reasons: 1. The main functional tests can't handle a call with a token inside because of their paste config. Even if they did, they would not be able to validate it. 2. The main functional tests don't support multiple API workers with separate config and making them work that way is non-trivial. Functional tests are fairly synthetic and fixing or hacking over the above push us only further so. Using theh Synchronous API method is vastly easier, easier to verify, and tests the integration across the API calls, which is what is important. """ def setUp(self): super(TestImportProxy, self).setUp() # Emulate a keystoneauth1 client for service-to-service communication self.ksa_client = self.useFixture( fixtures.MockPatch('glance.context.get_ksa_client')).mock def test_import_proxy(self): resp = requests.Response() resp.status_code = 202 resp.headers['x-openstack-request-id'] = 'req-remote' self.ksa_client.return_value.post.return_value = resp # Stage it on worker1 self.config(worker_self_reference_url='http://worker1') self.start_server(set_worker_url=False) image_id = self._create_and_stage() # Make sure we can't see the stage host key image = self.api_get('/v2/images/%s' % image_id).json self.assertIn('container_format', image) self.assertNotIn('os_glance_stage_host', image) # Import call goes to worker2 self.config(worker_self_reference_url='http://worker2') self.start_server(set_worker_url=False) r = self._import_direct(image_id, ['store1']) # Assert that it was proxied back to worker1 self.assertEqual(202, r.status_code) self.assertEqual('req-remote', r.headers['x-openstack-request-id']) self.ksa_client.return_value.post.assert_called_once_with( 'http://worker1/v2/images/%s/import' % image_id, timeout=60, json={'method': {'name': 'glance-direct'}, 'stores': ['store1'], 'all_stores': False}) def test_import_proxy_fail_on_remote(self): resp = requests.Response() resp.url = '/v2' resp.status_code = 409 resp.reason = 'Something Failed (tm)' self.ksa_client.return_value.post.return_value = resp self.ksa_client.return_value.delete.return_value = resp # Stage it on worker1 self.config(worker_self_reference_url='http://worker1') self.start_server(set_worker_url=False) image_id = self._create_and_stage() # Import call goes to worker2 self.config(worker_self_reference_url='http://worker2') self.start_server(set_worker_url=False) r = self._import_direct(image_id, ['store1']) # Make sure we see the relevant details from worker1 self.assertEqual(409, r.status_code) self.assertEqual('409 Something Failed (tm)', r.status) # For a 40x, we should get the same on delete r = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(409, r.status_code) self.assertEqual('409 Something Failed (tm)', r.status) def _test_import_proxy_fail_requests(self, error, status): self.ksa_client.return_value.post.side_effect = error self.ksa_client.return_value.delete.side_effect = error # Stage it on worker1 self.config(worker_self_reference_url='http://worker1') self.start_server(set_worker_url=False) image_id = self._create_and_stage() # Import call goes to worker2 self.config(worker_self_reference_url='http://worker2') self.start_server(set_worker_url=False) r = self._import_direct(image_id, ['store1']) self.assertEqual(status, r.status) self.assertIn(b'Stage host is unavailable', r.body) # Make sure we can still delete it r = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(204, r.status_code) r = self.api_get('/v2/images/%s' % image_id) self.assertEqual(404, r.status_code) def test_import_proxy_connection_refused(self): self._test_import_proxy_fail_requests( requests.exceptions.ConnectionError(), '504 Gateway Timeout') def test_import_proxy_connection_timeout(self): self._test_import_proxy_fail_requests( requests.exceptions.ConnectTimeout(), '504 Gateway Timeout') def test_import_proxy_connection_unknown_error(self): self._test_import_proxy_fail_requests( requests.exceptions.RequestException(), '502 Bad Gateway') def get_enforcer_class(limits): class FakeEnforcer: def __init__(self, callback): self._callback = callback def enforce(self, project_id, values): for name, delta in values.items(): current = self._callback(project_id, values.keys()) if current.get(name) + delta > limits.get(name, 0): raise ol_exc.ProjectOverLimit( project_id=project_id, over_limit_info_list=[ol_exc.OverLimitInfo( name, limits.get(name), current.get(name), delta)]) def calculate_usage(self, project_id, names): return { name: limit.ProjectUsage( limits.get(name, 0), self._callback(project_id, [name])[name]) for name in names} return FakeEnforcer class TestKeystoneQuotas(functional.SynchronousAPIBase): def setUp(self): super(TestKeystoneQuotas, self).setUp() self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) self.config(filesystem_store_datadir='/tmp/foo', group='os_glance_tasks_store') self.enforcer_mock = self.useFixture( fixtures.MockPatchObject(ks_quota, 'limit')).mock def set_limit(self, limits): self.enforcer_mock.Enforcer = get_enforcer_class(limits) def test_upload(self): # Set a quota of 5MiB self.set_limit({'image_size_total': 5, 'image_count_total': 10, 'image_count_uploading': 10}) self.start_server() # First upload of 3MiB is good image_id = self._create_and_upload( data_iter=test_utils.FakeData(3 * units.Mi)) # Second upload of 3MiB is allowed to complete, but leaves us # over-quota self._create_and_upload( data_iter=test_utils.FakeData(3 * units.Mi)) # Third upload of any size fails because we are now over quota self._create_and_upload(expected_code=413) # Delete one image, which should put us under quota self.api_delete('/v2/images/%s' % image_id) # Upload should now succeed self._create_and_upload() def test_import(self): # Set a quota of 5MiB self.set_limit({'image_size_total': 5, 'image_count_total': 10, 'image_count_uploading': 10}) self.start_server() # First upload of 3MiB is good image_id = self._create_and_upload( data_iter=test_utils.FakeData(3 * units.Mi)) # Second upload of 3MiB is allowed to complete, but leaves us # over-quota self._create_and_upload(data_iter=test_utils.FakeData(3 * units.Mi)) # Attempt to import of any size fails because we are now over quota self._create_and_import(stores=['store1'], expected_code=413) # Delete one image, which should put us under quota self.api_delete('/v2/images/%s' % image_id) # Import should now succeed self._create_and_import(stores=['store1']) def test_import_would_go_over(self): # Set a quota limit of 5MiB self.set_limit({'image_size_total': 5, 'image_count_total': 10, 'image_count_uploading': 10}) self.start_server() # First upload of 3MiB is good image_id = self._create_and_upload( data_iter=test_utils.FakeData(3 * units.Mi)) # Stage a 3MiB image for later import import_id = self._create_and_stage( data_iter=test_utils.FakeData(3 * units.Mi)) # Import should fail the task because it would put us over our # 5MiB quota self._import_direct(import_id, ['store1']) image = self._wait_for_import(import_id) task = self._get_latest_task(import_id) self.assertEqual('failure', task['status']) self.assertIn(('image_size_total is over limit of 5 due to ' 'current usage 3 and delta 3'), task['message']) # Delete the first image to make space resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(204, resp.status_code) # Stage a 3MiB image for later import (this must be done # because a failed import cannot go back to 'uploading' status) import_id = self._create_and_stage( data_iter=test_utils.FakeData(3 * units.Mi)) # Make sure the import is possible now resp = self._import_direct(import_id, ['store1']) self.assertEqual(202, resp.status_code) image = self._wait_for_import(import_id) self.assertEqual('active', image['status']) task = self._get_latest_task(import_id) self.assertEqual('success', task['status']) def test_copy(self): # Set a size quota of 5MiB, with more staging quota than we need. self.set_limit({'image_size_total': 5, 'image_count_total': 10, 'image_stage_total': 15, 'image_count_uploading': 10}) self.start_server() # First import of 3MiB is good image_id = self._create_and_import( stores=['store1'], data_iter=test_utils.FakeData(3 * units.Mi)) # Second copy is allowed to complete, but leaves us us at # 6MiB of total usage, over quota req = self._import_copy(image_id, ['store2']) self.assertEqual(202, req.status_code) self._wait_for_import(image_id) self.assertEqual('success', self._get_latest_task(image_id)['status']) # Third copy should fail because we're over total size quota. req = self._import_copy(image_id, ['store3']) self.assertEqual(413, req.status_code) # Set our size quota to have enough space, but restrict our # staging quota to below the required size to stage the image # before copy. This request should succeed, but the copy task # should fail the staging quota check. self.set_limit({'image_size_total': 15, 'image_count_total': 10, 'image_stage_total': 5, 'image_count_uploading': 10}) req = self._import_copy(image_id, ['store3']) self.assertEqual(202, req.status_code) self._wait_for_import(image_id) self.assertEqual('failure', self._get_latest_task(image_id)['status']) # If we increase our stage quota, we should now be able to copy. self.set_limit({'image_size_total': 15, 'image_count_total': 10, 'image_stage_total': 10, 'image_count_uploading': 10}) req = self._import_copy(image_id, ['store3']) self.assertEqual(202, req.status_code) self._wait_for_import(image_id) self.assertEqual('success', self._get_latest_task(image_id)['status']) def test_stage(self): # Set a quota of 5MiB self.set_limit({'image_size_total': 15, 'image_stage_total': 5, 'image_count_total': 10, 'image_count_uploading': 10}) self.start_server() # Stage 6MiB, which is allowed to complete, but leaves us over # quota image_id = self._create_and_stage( data_iter=test_utils.FakeData(6 * units.Mi)) # Second stage fails because we are out of quota self._create_and_stage(expected_code=413) # Make sure that a web-download fails to actually run. image_id2 = self._create().json['id'] req = self._import_web_download(image_id2, ['store1'], 'http://example.com/foo.img') self.assertEqual(202, req.status_code) self._wait_for_import(image_id2) task = self._get_latest_task(image_id2) self.assertEqual('failure', task['status']) self.assertIn('image_stage_total is over limit', task['message']) # Finish importing one of the images, which should put us under quota # for staging req = self._import_direct(image_id, ['store1']) self.assertEqual(202, req.status_code) self._wait_for_import(image_id) # Stage should now succeed because we have freed up quota self._create_and_stage( data_iter=test_utils.FakeData(6 * units.Mi)) def test_create(self): # Set a quota of 2 images self.set_limit({'image_size_total': 15, 'image_count_total': 2, 'image_count_uploading': 10}) self.start_server() # Create one image image_id = self._create().json['id'] # Create a second. This leaves us *at* quota self._create() # Attempt to create a third is rejected as OverLimit resp = self._create() self.assertEqual(413, resp.status_code) # Delete one image, which should put us under quota self.api_delete('/v2/images/%s' % image_id) # Now we can create that third image self._create() def test_uploading_methods(self): self.set_limit({'image_size_total': 100, 'image_stage_total': 100, 'image_count_total': 100, 'image_count_uploading': 1}) self.start_server() # Create and stage one image. We are now at quota for count_uploading. image_id = self._create_and_stage() # Make sure we can not stage any more images. self._create_and_stage(expected_code=413) # Make sure we can not upload any more images. self._create_and_upload(expected_code=413) # Finish importing one of the images, which should put us under quota # for count_uploading. resp = self._import_direct(image_id, ['store1']) self.assertEqual(202, resp.status_code) self.assertEqual('active', self._wait_for_import(image_id)['status']) # Make sure we can upload now. self._create_and_upload() # Stage another, which should put us at quota for count_uploading. image_id2 = self._create_and_stage() # Start a copy. The request should succeed (because async) but # the task should ultimately fail because we are over quota. # NOTE(danms): It would be nice to try to do another copy or # upload while this is running, but since the task is fully # async and the copy happens quickly, we can't really time it # to avoid an unstable test (without some mocking). resp = self._import_copy(image_id, ['store2']) self.assertEqual(202, resp.status_code) self._wait_for_import(image_id) task = self._get_latest_task(image_id) self.assertEqual('failure', task['status']) self.assertIn('Resource image_count_uploading is over limit', task['message']) # Finish the staged import. self._import_direct(image_id2, ['store1']) self.assertEqual(202, resp.status_code) self._wait_for_import(image_id2) # Make sure we can upload again after the import finishes. self._create_and_upload() # Re-try the copy that should now succeed and wait for it to # finish. resp = self._import_copy(image_id, ['store2']) self.assertEqual(202, resp.status_code) self._wait_for_import(image_id) task = self._get_latest_task(image_id) self.assertEqual('success', task['status']) # Make sure we can still upload. self._create_and_upload() # Make sure we can still import. self._create_and_import(stores=['store1']) class TestStoreWeight(functional.SynchronousAPIBase): def setUp(self): super(TestStoreWeight, self).setUp() def test_store_weight_combinations(self): self.start_server() # Import image in all available stores image_id = self._create_and_import(stores=['store1', 'store2', 'store3']) # make sure as weight is default, we will get locations based # on insertion order image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual("store1,store2,store3", image['stores']) # give highest weight to store2 then store3 and then store1 self.config(weight=200, group='store2') self.config(weight=100, group='store3') self.config(weight=50, group='store1') self.start_server() # make sure as per store weight locations will be sorted # as store2,store3,store1 image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual("store2,store3,store1", image['stores']) # give highest weight to store3 then store1 and then store2 self.config(weight=20, group='store2') self.config(weight=100, group='store3') self.config(weight=50, group='store1') self.start_server() # make sure as per store weight locations will be sorted # as store3,store1,store2 image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual("store3,store1,store2", image['stores']) class TestMultipleBackendsLocationApi(functional.SynchronousAPIBase): def setUp(self): super(TestMultipleBackendsLocationApi, self).setUp() self.start_server() for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d' % i, ret[1]) setattr(self, 'http_port%d' % i, ret[2]) def setup_stores(self): pass def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'reader,member', } base_headers.update(custom_headers or {}) return base_headers def _setup_multiple_stores(self): self.ksa_client = self.useFixture( fixtures.MockPatch('glance.context.get_ksa_client')).mock self.config(enabled_backends={'store1': 'http', 'store2': 'http'}) glance_store.register_store_opts(CONF, reserved_stores=wsgi.RESERVED_STORES) self.config(default_backend='store1', group='glance_store') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') self.config(filesystem_store_datadir='/tmp/foo', group='os_glance_tasks_store') glance_store.create_multi_stores(CONF, reserved_stores=wsgi.RESERVED_STORES) glance_store.verify_store() def test_add_location_with_do_secure_hash_false(self): self.config(do_secure_hash=False) self._setup_multiple_stores() # Add Location with valid URL and do_secure_hash = False # with validation_data # Create an image 1 path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 with requests.get(url) as r: expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT1}) data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = '/v2/images/%s' % image_id func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=5, delay_sec=0.2, start_delay_sec=1, multistore=True) # Add Location with valid URL and do_secure_hash = False # without validation_data # Create an image 2 path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT1}) data = {'url': url} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = '/v2/images/%s' % image_id func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=5, delay_sec=0.2, start_delay_sec=1, multistore=True) def test_add_location_with_do_secure_hash_true_negative(self): self._setup_multiple_stores() # Create an image path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Add Location with non image owner path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT2}) url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 data = {'url': url} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.NOT_FOUND, response.status_code, response.text) # Add location with invalid validation_data # Invalid os_hash_value validation_data = { 'os_hash_algo': "sha512", 'os_hash_value': "dbc9e0f80d131e64b94913a7b40bb5" } headers = self._headers({'X-Tenant-Id': TENANT1}) data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data (without os_hash_algo) url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 with requests.get(url) as r: expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = {'os_hash_value': expect_h} data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data & # (invalid hash_algo) validation_data = { 'os_hash_algo': 'sha123', 'os_hash_value': expect_h} data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) # Add location with invalid validation_data # (mismatch hash_value with hash algo) with requests.get(url) as r: expect_h = str(hashlib.sha256(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.BAD_REQUEST, response.status_code, response.text) def test_add_location_with_do_secure_hash_true(self): self._setup_multiple_stores() # Create an image path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Add location with os_hash_algo other than sha512 path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha256(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha256', 'os_hash_value': expect_h} data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = '/v2/images/%s' % image_id func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1, multistore=True) # Show Image path = '/v2/images/%s' % image_id resp = self.api_get(path, headers=self._headers()) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) # Add location with valid validation data # os_hash_algo value sha512 # Create an image 3 path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/store2/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': expect_h} headers = self._headers({'X-Tenant-Id': TENANT1}) data = {'url': url, 'validation_data': validation_data} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) # Show Image path = '/v2/images/%s' % image_id resp = self.api_get(path, headers=self._headers()) output = jsonutils.loads(resp.text) self.assertEqual('queued', output['status']) path = '/v2/images/%s' % image_id func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1, multistore=True) # Show Image path = '/v2/images/%s' % image_id resp = self.api_get(path, headers=self._headers()) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) # Add Location with valid URL and do_secure_hash = True # without validation_data # Create an image 4 path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) path = '/v2/images/%s/locations' % image_id headers = self._headers({'X-Tenant-Id': TENANT1}) url = 'http://127.0.0.1:%s/store2/foo_image' % self.http_port0 with requests.get(url) as r: expect_c = str(md5(r.content, usedforsecurity=False).hexdigest()) expect_h = str(hashlib.sha512(r.content).hexdigest()) data = {'url': url} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.ACCEPTED, response.status_code, response.text) path = '/v2/images/%s' % image_id func_utils.wait_for_image_checksum_and_status(self, image_id, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1, multistore=True) # Show Image path = '/v2/images/%s' % image_id resp = self.api_get(path, headers=self._headers()) image = jsonutils.loads(resp.text) self.assertEqual(expect_c, image['checksum']) self.assertEqual(expect_h, image['os_hash_value']) def test_get_location(self): self._setup_multiple_stores() # Create an image path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) data = {'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'} response = self.api_post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) # Get location of `queued` image headers = self._headers({'X-Roles': 'service'}) path = '/v2/images/%s/locations' % image_id response = self.api_get(path, headers=headers) self.assertEqual(200, response.status_code, response.text) self.assertEqual(0, len(jsonutils.loads(response.text))) # Get location of invalid image image_id = str(uuid.uuid4()) path = '/v2/images/%s/locations' % image_id response = self.api_get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code, response.text) # Add Location with valid URL and image owner image_id = image['id'] path = '/v2/images/%s/locations' % image_id url = 'http://127.0.0.1:%s/store1/foo_image' % self.http_port0 data = {'url': url} response = self.api_post(path, headers=headers, json=data) self.assertEqual(202, response.status_code, response.text) path = '/v2/images/%s' % image_id headers = self._headers({'content-type': 'application/json'}) func_utils.wait_for_status(self, request_path=path, request_headers=headers, status='active', max_sec=10, delay_sec=0.2, start_delay_sec=1, multistore=True) # Get Locations not allowed for any other user headers = self._headers({'X-Roles': 'admin,member'}) path = '/v2/images/%s/locations' % image_id response = self.api_get(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code, response.text) # Get Locations allowed only for service user headers = self._headers({'X-Roles': 'service'}) path = '/v2/images/%s/locations' % image_id response = self.api_get(path, headers=headers) self.assertEqual(200, response.status_code, response.text) output = jsonutils.loads(response.text) self.assertEqual(url, output[0]['url']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_images_api_policy.py0000664000175000017500000011670700000000000025244 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from oslo_utils.fixture import uuidsentinel as uuids from glance.api import policy from glance.tests import functional class TestImagesPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestImagesPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestImagesPolicy, self).start_server() def test_image_update_basic(self): self.start_server() image_id = self._create_and_upload() # First make sure image update works with the default policy resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'add', 'path': '/mykey1', 'value': 'foo'}) self.assertEqual(200, resp.status_code, resp.text) self.assertEqual( 'foo', self.api_get('/v2/images/%s' % image_id).json['mykey1']) # Now disable modify_image permissions and make sure any other # attempts fail self.set_policy_rules({'get_image': '', 'modify_image': '!'}) # Add should fail resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'add', 'path': '/mykey2', 'value': 'foo'}) self.assertEqual(403, resp.status_code) self.assertNotIn( 'mykey2', self.api_get('/v2/images/%s' % image_id).json) # Replace should fail, old value should persist resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'replace', 'path': '/mykey1', 'value': 'bar'}) self.assertEqual(403, resp.status_code) self.assertEqual( 'foo', self.api_get('/v2/images/%s' % image_id).json['mykey1']) # Remove should fail, old value should persist resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'remove', 'path': '/mykey1'}) self.assertEqual(403, resp.status_code) self.assertEqual( 'foo', self.api_get('/v2/images/%s' % image_id).json['mykey1']) # Now disable get_image permissions and we should get a 404 # instead of a 403 when trying to do the same operation as above. # Remove should fail, old value should persist self.set_policy_rules({'get_image': '!', 'modify_image': '!'}) resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'remove', 'path': '/mykey1'}) self.assertEqual(404, resp.status_code) @mock.patch('glance.location._check_image_location', new=lambda *a: 0) @mock.patch('glance.location.ImageRepoProxy._set_acls', new=lambda *a: 0) def test_image_update_locations(self): self.config(show_multiple_locations=True) self.start_server() image_id = self._create_and_upload() # First make sure we can add and delete locations resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'add', 'path': '/locations/0', 'value': {'url': 'http://foo.bar', 'metadata': {}}}) self.assertEqual(200, resp.status_code, resp.text) self.assertEqual(2, len(self.api_get( '/v2/images/%s' % image_id).json['locations'])) self.assertEqual( 'http://foo.bar', self.api_get( '/v2/images/%s' % image_id).json['locations'][1]['url']) resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'remove', 'path': '/locations/0'}) self.assertEqual(200, resp.status_code, resp.text) self.assertEqual(1, len(self.api_get( '/v2/images/%s' % image_id).json['locations'])) # Add another while we still can so we can try to delete it below resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'add', 'path': '/locations/0', 'value': {'url': 'http://foo.baz', 'metadata': {}}}) self.assertEqual(200, resp.status_code, resp.text) self.assertEqual(2, len(self.api_get( '/v2/images/%s' % image_id).json['locations'])) # Now disable set/delete_image_location permissions and make # sure any other attempts fail self.set_policy_rules({'get_image': '', 'get_image_location': '', 'set_image_location': '!', 'delete_image_location': '!'}) # Make sure we cannot delete the above or add another resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'remove', 'path': '/locations/0'}) self.assertEqual(403, resp.status_code, resp.text) self.assertEqual(2, len(self.api_get( '/v2/images/%s' % image_id).json['locations'])) resp = self.api_patch('/v2/images/%s' % image_id, {'op': 'add', 'path': '/locations/0', 'value': {'url': 'http://foo.baz', 'metadata': {}}}) self.assertEqual(403, resp.status_code, resp.text) self.assertEqual(2, len(self.api_get( '/v2/images/%s' % image_id).json['locations'])) def test_image_get(self): self.start_server() image_id = self._create_and_upload() # Make sure we can get the image image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual(image_id, image['id']) # Make sure we can list the image images = self.api_get('/v2/images').json['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Now disable get_images but allow get_image self.set_policy_rules({'get_images': '!', 'get_image': ''}) # We should not be able to list, but still fetch the image by id resp = self.api_get('/v2/images') self.assertEqual(403, resp.status_code) image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual(image_id, image['id']) # Now disable get_image but allow get_images self.set_policy_rules({'get_images': '', 'get_image': '!'}) # We should be able to list, but not actually see the image in the list images = self.api_get('/v2/images').json['images'] self.assertEqual(0, len(images)) resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual(404, resp.status_code) # Now disable both get_image and get_images self.set_policy_rules({'get_images': '!', 'get_image': '!'}) # We should not be able to list or fetch by id resp = self.api_get('/v2/images') self.assertEqual(403, resp.status_code) resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual(404, resp.status_code) def test_image_create(self): self.start_server() # Make sure we can create an image self.assertEqual(201, self._create().status_code) # Now disable add_image and make sure we get 403 self.set_policy_rules({'add_image': '!'}) self.assertEqual(403, self._create().status_code) def test_image_create_by_another(self): self.start_server() # NOTE(danms): There is no policy override in this test, # specifically to test that the defaults (for rbac and # non-rbac) properly catch the attempt by a non-admin to # create an image owned by someone else. image = {'name': 'foo', 'container_format': 'bare', 'disk_format': 'raw', 'owner': 'someoneelse'} resp = self.api_post('/v2/images', json=image, headers={'X-Roles': 'member'}) # Make sure we get the expected owner-specific error message self.assertIn("You are not permitted to create images " "owned by 'someoneelse'", resp.text) def test_image_delete(self): self.start_server() image_id = self._create_and_upload() # Make sure we can delete the image resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(204, resp.status_code) # Make sure it is really gone resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual(404, resp.status_code) # Make sure we get a 404 trying to delete a non-existent image resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(404, resp.status_code) image_id = self._create_and_upload() # Now disable delete permissions, but allow get_image self.set_policy_rules({'get_image': '', 'delete_image': '!'}) # Make sure delete returns 403 because we can see the image, # just not delete it resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(403, resp.status_code) # Now disable delete permissions, including get_image self.set_policy_rules({'get_image': '!', 'delete_image': '!'}) # Make sure delete returns 404 because we can not see nor # delete it resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(404, resp.status_code) # Now allow delete, but disallow get_image, just to prove that # you do not need get_image in order to be granted delete, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({'get_image': '!', 'delete_image': ''}) # Make sure delete returns 204 because even though we can not # see the image, we can delete it resp = self.api_delete('/v2/images/%s' % image_id) self.assertEqual(204, resp.status_code) def test_image_upload(self): self.start_server() # Make sure we can upload the image self._create_and_upload(expected_code=204) # Now disable upload permissions, but allow get_image self.set_policy_rules({ 'add_image': '', 'get_image': '', 'upload_image': '!' }) # Make sure upload returns 403 because we can see the image, # just not upload data to it self._create_and_upload(expected_code=403) # Now disable upload permissions, including get_image self.set_policy_rules({ 'add_image': '', 'get_image': '!', 'upload_image': '!', }) # Make sure upload returns 404 because we can not see nor # upload data to it self._create_and_upload(expected_code=404) # Now allow upload, but disallow get_image, just to prove that # you do not need get_image in order to be granted upload, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({ 'add_image': '', 'get_image': '!', 'upload_image': ''}) # Make sure upload returns 204 because even though we can not # see the image, we can upload data to it self._create_and_upload(expected_code=204) def test_image_download(self): # NOTE(abhishekk): These tests are running without cache middleware self.start_server() image_id = self._create_and_upload() # First make sure we can download image path = '/v2/images/%s/file' % image_id response = self.api_get(path) self.assertEqual(200, response.status_code) self.assertEqual('IMAGEDATA', response.text) # Now disable download permissions, but allow get_image self.set_policy_rules({ 'get_image': '', 'download_image': '!' }) # Make sure download returns 403 because we can see the image, # just not download it response = self.api_get(path) self.assertEqual(403, response.status_code) # Now disable download permissions, including get_image self.set_policy_rules({ 'get_image': '!', 'download_image': '!', }) # Make sure download returns 404 because we can not see nor # download it response = self.api_get(path) self.assertEqual(404, response.status_code) # Now allow download, but disallow get_image, just to prove that # you do not need get_image in order to be granted download, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({ 'get_image': '!', 'download_image': ''}) # Make sure download returns 200 because even though we can not # see the image, we can download it response = self.api_get(path) self.assertEqual(200, response.status_code) self.assertEqual('IMAGEDATA', response.text) def test_image_stage(self): self.start_server() # First make sure we can perform staging operation self._create_and_stage(expected_code=204) # Now disable get_image permissions, but allow modify_image # should return 204 as well, means even if we can not see # image details, we can stage data for it. self.set_policy_rules({ 'get_image': '!', 'modify_image': '', 'add_image': '' }) self._create_and_stage(expected_code=204) # Now allow get_image and disable modify_image should return 403 self.set_policy_rules({ 'get_image': '', 'modify_image': '!', 'add_image': '' }) self._create_and_stage(expected_code=403) # Now disabling both permissions will return 404 self.set_policy_rules({ 'get_image': '!', 'modify_image': '!', 'add_image': '' }) self._create_and_stage(expected_code=404) # create shared visibility image and stage by 2nd project should # return 404 until it is actually shared with that project. self.set_policy_rules({ 'get_image': '', 'modify_image': '!', 'add_image': '', 'add_member': '' }) resp = self.api_post('/v2/images', json={'name': 'foo', 'container_format': 'bare', 'disk_format': 'raw', 'visibility': 'shared'}) self.assertEqual(201, resp.status_code, resp.text) image = resp.json # Now stage data using another project details headers = self._headers({ 'X-Project-Id': 'fake-tenant-id', 'Content-Type': 'application/octet-stream' }) resp = self.api_put( '/v2/images/%s/stage' % image['id'], headers=headers, data=b'IMAGEDATA') self.assertEqual(404, resp.status_code) # Now share image with another project and then staging # data by that project should return 403 path = '/v2/images/%s/members' % image['id'] data = { 'member': uuids.random_member } response = self.api_post(path, json=data) member = response.json self.assertEqual(200, response.status_code) self.assertEqual(image['id'], member['image_id']) # Now stage data using another project details headers = self._headers({ 'X-Project-Id': uuids.random_member, 'X-Roles': 'member', 'Content-Type': 'application/octet-stream' }) resp = self.api_put( '/v2/images/%s/stage' % image['id'], headers=headers, data=b'IMAGEDATA') self.assertEqual(403, resp.status_code) def test_image_deactivate(self): self.start_server() image_id = self._create_and_upload() # Make sure we can deactivate the image resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(204, resp.status_code) # Make sure it is really deactivated resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual('deactivated', resp.json['status']) # Create another image image_id = self._create_and_upload() # Now disable deactivate permissions, but allow get_image self.set_policy_rules({'get_image': '', 'deactivate': '!'}) # Make sure deactivate returns 403 because we can see the image, # just not deactivate it resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(403, resp.status_code) # Now disable deactivate permissions, including get_image self.set_policy_rules({'get_image': '!', 'deactivate': '!'}) # Make sure deactivate returns 404 because we can not see nor # reactivate it resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(404, resp.status_code) # Now allow deactivate, but disallow get_image, just to prove that # you do not need get_image in order to be granted deactivate, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({'get_image': '!', 'deactivate': ''}) # Make sure deactivate returns 204 because even though we can not # see the image, we can deactivate it resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(204, resp.status_code) # Make sure you can not deactivate image using non-admin role of # different project self.set_policy_rules({ 'get_image': '', 'modify_image': '', 'add_image': '', 'upload_image': '', 'add_member': '', 'deactivate': '', 'publicize_image': '', 'communitize_image': '' }) headers = self._headers({ 'X-Project-Id': 'fake-project-id', 'X-Roles': 'member' }) for visibility in ('community', 'shared', 'private', 'public'): image_id = self._create_and_upload(visibility=visibility) resp = self.api_post( '/v2/images/%s/actions/deactivate' % image_id, headers=headers) # 'shared' image will return 404 until it is not shared with # project accessing it if visibility == 'shared': self.assertEqual(404, resp.status_code) # Now lets share the image and try to deactivate it share_path = '/v2/images/%s/members' % image_id data = { 'member': 'fake-project-id' } response = self.api_post(share_path, json=data) member = response.json self.assertEqual(200, response.status_code) self.assertEqual(image_id, member['image_id']) # Now ensure deactivating image by another tenant will # return 403 resp = self.api_post( '/v2/images/%s/actions/deactivate' % image_id, headers=headers) self.assertEqual(403, resp.status_code) elif visibility == 'private': # private image will also return 404 as it is not visible self.assertEqual(404, resp.status_code) else: # public and community visibility will return 403 self.assertEqual(403, resp.status_code) def test_image_reactivate(self): self.start_server() image_id = self._create_and_upload() # deactivate the image resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(204, resp.status_code) # Make sure it is really deactivated resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual('deactivated', resp.json['status']) # Make sure you can reactivate the image resp = self.api_post('/v2/images/%s/actions/reactivate' % image_id) self.assertEqual(204, resp.status_code) # Make sure it is really reactivated resp = self.api_get('/v2/images/%s' % image_id) self.assertEqual('active', resp.json['status']) # Deactivate it again to test further scenarios resp = self.api_post('/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(204, resp.status_code) # Now disable reactivate permissions, but allow get_image self.set_policy_rules({'get_image': '', 'reactivate': '!'}) # Make sure reactivate returns 403 because we can see the image, # just not reactivate it resp = self.api_post('/v2/images/%s/actions/reactivate' % image_id) self.assertEqual(403, resp.status_code) # Now disable reactivate permissions, including get_image self.set_policy_rules({'get_image': '!', 'reactivate': '!'}) # Make sure reactivate returns 404 because we can not see nor # reactivate it resp = self.api_post('/v2/images/%s/actions/reactivate' % image_id) self.assertEqual(404, resp.status_code) # Now allow reactivate, but disallow get_image, just to prove that # you do not need get_image in order to be granted reactivate, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({'get_image': '!', 'reactivate': ''}) # Make sure reactivate returns 204 because even though we can not # see the image, we can reactivate it resp = self.api_post('/v2/images/%s/actions/reactivate' % image_id) self.assertEqual(204, resp.status_code) # Make sure you can not reactivate image using non-admin role of # different project self.set_policy_rules({ 'get_image': '', 'modify_image': '', 'add_image': '', 'upload_image': '', 'add_member': '', 'deactivate': '', 'reactivate': '', 'publicize_image': '', 'communitize_image': '' }) headers = self._headers({ 'X-Project-Id': 'fake-project-id', 'X-Roles': 'member' }) for visibility in ('public', 'community', 'shared', 'private'): image_id = self._create_and_upload(visibility=visibility) # deactivate the image resp = self.api_post( '/v2/images/%s/actions/deactivate' % image_id) self.assertEqual(204, resp.status_code) # try to reactivate the image resp = self.api_post( '/v2/images/%s/actions/reactivate' % image_id, headers=headers) # 'shared' image will return 404 until it is not shared with # project accessing it if visibility == 'shared': self.assertEqual(404, resp.status_code) # Now lets share the image and try to reactivate it share_path = '/v2/images/%s/members' % image_id data = { 'member': 'fake-project-id' } response = self.api_post(share_path, json=data) member = response.json self.assertEqual(200, response.status_code) self.assertEqual(image_id, member['image_id']) # Now ensure reactivating image by another tenant will # return 403 resp = self.api_post( '/v2/images/%s/actions/reactivate' % image_id, headers=headers) self.assertEqual(403, resp.status_code) elif visibility == 'private': # private image will also return 404 as it is not visible self.assertEqual(404, resp.status_code) else: # public and community visibility will return 403 self.assertEqual(403, resp.status_code) def test_delete_from_store(self): self.start_server() # First create image in multiple stores image_id = self._create_and_import(stores=['store1', 'store2', 'store3']) # Make sure we are able to delete image from the specific store path = "/v2/stores/store1/%s" % image_id response = self.api_delete(path) self.assertEqual(204, response.status_code) # Disable get_image_location and verify you will get 403 self.set_policy_rules({ 'get_image': '', 'delete_image_location': '', 'get_image_location': '!' }) path = "/v2/stores/store2/%s" % image_id response = self.api_delete(path) self.assertEqual(403, response.status_code) # Disable delete_image_location and verify you will get 403 self.set_policy_rules({ 'get_image': '', 'delete_image_location': '!', 'get_image_location': '' }) path = "/v2/stores/store2/%s" % image_id response = self.api_delete(path) self.assertEqual(403, response.status_code) # Disabling all, you will get 404 self.set_policy_rules({ 'get_image': '!', 'delete_image_location': '!', 'get_image_location': '!' }) path = "/v2/stores/store2/%s" % image_id response = self.api_delete(path) self.assertEqual(404, response.status_code) # Now allow delete_image_location and get_image_location, but disallow # get_image, just to prove that you do not need get_image in order # to be granted delete image from particular store, and # that we only use it for error code determination if # permission is denied. self.set_policy_rules({ 'get_image': '!', 'delete_image_location': '', 'get_image_location': '' }) path = "/v2/stores/store2/%s" % image_id response = self.api_delete(path) self.assertEqual(204, response.status_code) # deleting image with non-admin will get 403 self.set_policy_rules({ 'get_image': '', 'delete_image_location': '', 'get_image_location': '' }) headers = self._headers({ 'X-Roles': 'member' }) path = "/v2/stores/store2/%s" % image_id response = self.api_delete(path, headers=headers) self.assertEqual(403, response.status_code) def test_copy_image(self): self.start_server() # create image using import image_id = self._create_and_import( stores=['store1'], visibility='public') # Make sure you can copy image to another store self.set_policy_rules({ 'copy_image': 'role:admin', 'get_image': '', 'modify_image': '' }) store_to_copy = ["store2"] response = self._import_copy(image_id, store_to_copy) self.assertEqual(202, response.status_code) self._wait_for_import(image_id) self.assertEqual('success', self._get_latest_task(image_id)['status']) # Now disable copy image and see you will get 403 Forbidden store_to_copy = ["store3"] self.set_policy_rules({ 'copy_image': '!', 'get_image': '', 'modify_image': '' }) response = self._import_copy(image_id, store_to_copy) self.assertEqual(403, response.status_code) # Verify that non-admin but member of same project can not copy image self.set_policy_rules({ 'copy_image': 'role:admin', 'get_image': '', 'modify_image': '' }) headers = self._headers({'X-Roles': 'member'}) response = self._import_copy(image_id, store_to_copy, headers=headers) self.assertEqual(403, response.status_code) # Verify that non-owner can not copy image self.set_policy_rules({ 'copy_image': 'role:admin', 'get_image': '', 'modify_image': '' }) headers = self._headers({ 'X-Roles': 'member', 'X-Project-Id': 'fake-project-id' }) response = self._import_copy(image_id, store_to_copy, headers=headers) self.assertEqual(403, response.status_code) # Now disable copy image and get_image and see you will get # 404 NotFound self.set_policy_rules({ 'copy_image': '!', 'get_image': '!', 'modify_image': '' }) store_to_copy = ["store3"] print(self.policy.rules.items()) response = self._import_copy(image_id, store_to_copy) self.assertEqual(404, response.status_code) def test_import_glance_direct(self): self.start_server() # create image and stage data to it image_id = self._create_and_stage(visibility='public') # Make sure you can import using glance-direct self.set_policy_rules({ 'get_image': '', 'communitize_image': '', 'add_image': '', 'modify_image': '' }) store_to_import = ['store1'] response = self._import_direct(image_id, store_to_import) self.assertEqual(202, response.status_code) self._wait_for_import(image_id) self.assertEqual('success', self._get_latest_task(image_id)['status']) # Make sure you can import data to image using non-admin role image_id = self._create_and_stage(visibility='community') headers = self._headers({'X-Roles': 'member'}) response = self._import_direct(image_id, store_to_import, headers=headers) self.assertEqual(202, response.status_code) self._wait_for_import(image_id) self.assertEqual('success', self._get_latest_task(image_id)['status']) # Make sure you can not import data to image using non-admin role of # different project image_id = self._create_and_stage(visibility='community') # Make sure you will get 403 Forbidden self.set_policy_rules({ 'get_image': '', 'modify_image': '!' }) headers = self._headers({ 'X-Roles': 'member', 'X-Project-Id': 'fake-project-id' }) response = self._import_direct(image_id, store_to_import, headers=headers) self.assertEqual(403, response.status_code) # disabling both get_image and modify_image should return 404 NotFound self.set_policy_rules({ 'get_image': '!', 'modify_image': '!' }) headers = self._headers({ 'X-Roles': 'member', 'X-Project-Id': 'fake-project-id' }) response = self._import_direct(image_id, store_to_import, headers=headers) self.assertEqual(404, response.status_code) def _test_image_ownership(self, headers, method): self.set_policy_rules({ 'get_image': '', 'add_image': '', 'publicize_image': '', 'communitize_image': '', 'add_member': '', }) for visibility in ('community', 'public', 'shared'): path = "/v2/images" data = { 'name': '%s-image' % visibility, 'visibility': visibility, } # create image response = self.api_post(path, json=data) image = response.json self.assertEqual(201, response.status_code) self.assertEqual(visibility, image['visibility']) # share the image if visibility is shared if visibility == 'shared': path = '/v2/images/%s/members' % image['id'] data = { 'member': 'fake-project-id' } response = self.api_post(path, json=data) self.assertEqual(200, response.status_code) # Add/Delete tag path = '/v2/images/%s/tags/Test_Tag_2' % image['id'] response = self.api_request(method, path, headers=headers) self.assertEqual(403, response.status_code) def test_image_tag_update(self): self.start_server() # Create image image_id = self._create_and_upload() # Make sure we will be able to add tags for the image path = '/v2/images/%s/tags/Test_Tag' % image_id response = self.api_put(path) self.assertEqual(204, response.status_code) # Make sure tag is added to image path = '/v2/images/%s' % image_id response = self.api_get(path) image = response.json self.assertEqual(['Test_Tag'], image['tags']) # Disable get_image and modify_image should give us 404 Not Found self.set_policy_rules({ 'get_image': '!', 'modify_image': '!' }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_put(path) self.assertEqual(404, response.status_code) # Allow get_image and disable modify_image should give us # 403 Forbidden self.set_policy_rules({ 'get_image': '', 'modify_image': '!' }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_put(path) self.assertEqual(403, response.status_code) # Adding tag by another project (non-admin user) should return # 404 Not Found for private image self.set_policy_rules({ 'get_image': '', 'modify_image': '' }) # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need image # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Project-Id': 'fake-project-id', 'X-Roles': 'member', }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_put(path, headers=headers) self.assertEqual(404, response.status_code) # Adding tag by another project (non-admin user) should return # 403 Not Found for other than private image self._test_image_ownership(headers, 'PUT') def test_image_tag_delete(self): self.start_server() # Create image image_id = self._create_and_upload() # Make sure we will be able to add tags for the image path = '/v2/images/%s/tags/Test_Tag_1' % image_id response = self.api_put(path) self.assertEqual(204, response.status_code) # add another tag while we can path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_put(path) self.assertEqual(204, response.status_code) # Make sure tags are added to image path = '/v2/images/%s' % image_id response = self.api_get(path) image = response.json self.assertItemsEqual(['Test_Tag_1', 'Test_Tag_2'], image['tags']) # Now delete tag from image path = '/v2/images/%s/tags/Test_Tag_1' % image_id response = self.api_delete(path) self.assertEqual(204, response.status_code) # Make sure tag is deleted path = '/v2/images/%s' % image_id response = self.api_get(path) image = response.json self.assertNotIn('Test_Tag_1', image['tags']) # Disable get_image and modify_image should give us 404 Not Found self.set_policy_rules({ 'get_image': '!', 'modify_image': '!' }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_delete(path) self.assertEqual(404, response.status_code) # Allow get_image and disable modify_image should give us # 403 Forbidden self.set_policy_rules({ 'get_image': '', 'modify_image': '!' }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_delete(path) self.assertEqual(403, response.status_code) # Deleting tag by another project (non-admin user) should return # 404 Not Found for private image self.set_policy_rules({ 'get_image': '', 'modify_image': '' }) # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need image # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Project-Id': 'fake-project-id', 'X-Roles': 'member', }) path = '/v2/images/%s/tags/Test_Tag_2' % image_id response = self.api_delete(path, headers=headers) self.assertEqual(404, response.status_code) # Deleting tag by another project (non-admin user) should return # 403 Not Found for other than private image self._test_image_ownership(headers, 'DELETE') def test_get_task_info(self): self.start_server() image_id = self._create_and_import( stores=['store1'], visibility='public') # Make sure you can get task information of that image path = '/v2/images/%s/tasks' % image_id response = self.api_get(path) self.assertEqual(200, response.status_code) # Disable get_image should give us 404 Not Found self.set_policy_rules({'get_image': '!'}) path = '/v2/images/%s/tasks' % image_id response = self.api_get(path) self.assertEqual(404, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_images_import_locking.py0000664000175000017500000002160700000000000026126 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from testtools import content as ttc import time from unittest import mock import uuid from oslo_log import log as logging from oslo_utils import fixture as time_fixture from oslo_utils import units from glance.tests import functional from glance.tests import utils as test_utils LOG = logging.getLogger(__name__) class TestImageImportLocking(functional.SynchronousAPIBase): def _get_image_import_task(self, image_id, task_id=None): if task_id is None: image = self.api_get('/v2/images/%s' % image_id).json task_id = image['os_glance_import_task'] return self.api_get('/v2/tasks/%s' % task_id).json def _test_import_copy(self, warp_time=False): self.start_server() state = {'want_run': True} # Create and import an image with no pipeline stall image_id = self._create_and_import(stores=['store1']) # Set up a fake data pipeline that will stall until we are ready # to unblock it def slow_fake_set_data(data_iter, size=None, backend=None, set_active=True): me = str(uuid.uuid4()) while state['want_run'] == True: LOG.info('fake_set_data running %s', me) state['running'] = True time.sleep(0.1) LOG.info('fake_set_data ended %s', me) # Constrain oslo timeutils time so we can manipulate it tf = time_fixture.TimeFixture() self.useFixture(tf) # Turn on the delayed data pipeline and start a copy-image # import which will hang out for a while with mock.patch('glance.location.ImageProxy.set_data') as mock_sd: mock_sd.side_effect = slow_fake_set_data resp = self._import_copy(image_id, ['store2']) self.addDetail('First import response', ttc.text_content(str(resp))) self.assertEqual(202, resp.status_code) # Wait to make sure the data stream gets started for i in range(0, 10): if 'running' in state: break time.sleep(0.1) # Make sure the first import got to the point where the # hanging loop will hold it in processing state self.assertTrue(state.get('running', False), 'slow_fake_set_data() never ran') # Make sure the task is available and in the right state first_import_task = self._get_image_import_task(image_id) self.assertEqual('processing', first_import_task['status']) # If we're warping time, then advance the clock by two hours if warp_time: tf.advance_time_delta(datetime.timedelta(hours=2)) # Try a second copy-image import. If we are warping time, # expect the lock to be busted. If not, then we should get # a 409 Conflict. resp = self._import_copy(image_id, ['store3']) time.sleep(0.1) self.addDetail('Second import response', ttc.text_content(str(resp))) if warp_time: self.assertEqual(202, resp.status_code) else: self.assertEqual(409, resp.status_code) self.addDetail('First task', ttc.text_content(str(first_import_task))) # Grab the current import task for our image, and also # refresh our first task object second_import_task = self._get_image_import_task(image_id) first_import_task = self._get_image_import_task( image_id, first_import_task['id']) if warp_time: # If we warped time and busted the lock, then we expect the # current task to be different than the original task self.assertNotEqual(first_import_task['id'], second_import_task['id']) # The original task should be failed with the expected message self.assertEqual('failure', first_import_task['status']) self.assertEqual('Expired lock preempted', first_import_task['message']) # The new task should be off and running self.assertEqual('processing', second_import_task['status']) else: # We didn't bust the lock, so we didn't start another # task, so confirm it hasn't changed self.assertEqual(first_import_task['id'], second_import_task['id']) return image_id, state def test_import_copy_locked(self): self._test_import_copy(warp_time=False) def test_import_copy_bust_lock(self): image_id, state = self._test_import_copy(warp_time=True) # After the import has busted the lock, wait for our # new import to start. We used a different store than # the stalled task so we can tell the difference. for i in range(0, 10): image = self.api_get('/v2/images/%s' % image_id).json if image['stores'] == 'store1,store3': break time.sleep(0.1) # After completion, we expect store1 (original) and store3 (new) # and that the other task is still stuck importing image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual('store1,store3', image['stores']) self.assertEqual('', image['os_glance_failed_import']) # Free up the stalled task and give eventlet time to let it # play out the rest of the task state['want_run'] = False for i in range(0, 10): image = self.api_get('/v2/images/%s' % image_id).json time.sleep(0.1) # After that, we expect everything to be cleaned up and in the # terminal state that we expect. image = self.api_get('/v2/images/%s' % image_id).json self.assertEqual('', image.get('os_glance_import_task', '')) self.assertEqual('', image['os_glance_importing_to_stores']) self.assertEqual('', image['os_glance_failed_import']) self.assertEqual('store1,store3', image['stores']) @mock.patch('oslo_utils.timeutils.StopWatch.expired', new=lambda x: True) def test_import_task_status(self): self.start_server() # Generate 3 MiB of data for the image, enough to get a few # status messages limit = 3 * units.Mi image_id = self._create_and_stage(data_iter=test_utils.FakeData(limit)) # This utility function will grab the current task status at # any time and stash it into a list of statuses if it finds a # new one statuses = [] def grab_task_status(): image = self.api_get('/v2/images/%s' % image_id).json task_id = image['os_glance_import_task'] task = self.api_get('/v2/tasks/%s' % task_id).json msg = task['message'] if msg not in statuses: statuses.append(msg) # This is the only real thing we have mocked out, which is the # "upload this to glance_store" part, which we override so we # can control the block size and check our task status # synchronously and not depend on timers. It just reads the # source data in 64KiB chunks and throws it away. def fake_upload(data, *a, **k): while True: grab_task_status() if not data.read(65536): break time.sleep(0.1) with mock.patch('glance.location.ImageProxy._upload_to_store') as mu: mu.side_effect = fake_upload # Start the import... resp = self._import_direct(image_id, ['store2']) self.assertEqual(202, resp.status_code) # ...and wait until it finishes for i in range(0, 100): image = self.api_get('/v2/images/%s' % image_id).json if not image.get('os_glance_import_task'): break time.sleep(0.1) # Image should be in active state and we should have gotten a # new message every 1MiB in the process. We mocked StopWatch # to always be expired so that we fire the callback every # time. self.assertEqual('active', image['status']) self.assertEqual(['', 'Copied 0 MiB', 'Copied 1 MiB', 'Copied 2 MiB', 'Copied 3 MiB'], statuses) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_legacy_update_cinder_store.py0000664000175000017500000003130400000000000027122 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from unittest import mock import uuid from cinderclient.v3 import client as cinderclient import glance_store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from glance.common import wsgi from glance.tests import functional # Keeping backward compatibility to support importing from old # path try: from glance_store._drivers.cinder import base from glance_store._drivers.cinder import store as cinder except ImportError: from glance_store._drivers import cinder base = mock.Mock() LOG = logging.getLogger(__name__) CONF = cfg.CONF class TestLegacyUpdateCinderStore(functional.SynchronousAPIBase): def setUp(self): super(TestLegacyUpdateCinderStore, self).setUp() self.vol_id = uuid.uuid4() self.volume = mock.MagicMock( id=self.vol_id, status='available', size=1, multiattach=False, encrypted=False, delete=mock.MagicMock(), update_all_metadata=mock.MagicMock(), update_readonly_flag=mock.MagicMock()) self.volume.manager = mock.MagicMock(get=lambda id: self.volume) self.cinder_store_mock = mock.MagicMock( attachments=mock.MagicMock(), client=mock.MagicMock(), volumes=mock.MagicMock( get=lambda v_id: mock.MagicMock(volume_type='fast'), create=lambda size_gb, name, metadata, volume_type: self.volume)) fake_ip = '127.0.0.1' self.fake_socket_return = [[0, 1, 2, 3, [fake_ip]]] def setup_stores(self): pass def setup_single_store(self): glance_store.register_opts(CONF) self.config(show_multiple_locations=True) self.config(show_image_direct_url=True) self.config(default_store='cinder', group='glance_store') self.config(stores=['http', 'swift', 'cinder'], group='glance_store') self.config(cinder_volume_type='fast', group='glance_store') self.config(cinder_store_user_name='fake_user', group='glance_store') self.config(cinder_store_password='fake_pass', group='glance_store') self.config(cinder_store_project_name='fake_project', group='glance_store') self.config(cinder_store_auth_address='http://auth_addr', group='glance_store') glance_store.create_stores(CONF) def unset_single_store(self): glance_store.register_opts(CONF) self.config(show_multiple_locations=True) self.config(show_image_direct_url=True) self.config(stores=[], group='glance_store') self.config(cinder_volume_type='', group='glance_store') self.config(cinder_store_user_name='', group='glance_store') self.config(cinder_store_password='', group='glance_store') self.config(cinder_store_project_name='', group='glance_store') self.config(cinder_store_auth_address='', group='glance_store') glance_store.create_stores(CONF) @mock.patch.object(cinderclient, 'Client') def setup_multiple_stores(self, mock_client): """Configures multiple backend stores. This configures the API with two cinder stores (store1 and store2) as well as a os_glance_staging_store for imports. """ self.config(show_multiple_locations=True) self.config(show_image_direct_url=True) self.config(enabled_backends={'store1': 'cinder', 'store2': 'cinder'}) glance_store.register_store_opts(CONF, reserved_stores=wsgi.RESERVED_STORES) self.config(default_backend='store1', group='glance_store') self.config(cinder_volume_type='fast', group='store1') self.config(cinder_store_user_name='fake_user', group='store1') self.config(cinder_store_password='fake_pass', group='store1') self.config(cinder_store_project_name='fake_project', group='store1') self.config(cinder_store_auth_address='http://auth_addr', group='store1') self.config(cinder_volume_type='reliable', group='store2') self.config(cinder_store_user_name='fake_user', group='store2') self.config(cinder_store_password='fake_pass', group='store2') self.config(cinder_store_project_name='fake_project', group='store2') self.config(cinder_store_auth_address='http://auth_addr', group='store2') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') glance_store.create_multi_stores(CONF, reserved_stores=wsgi.RESERVED_STORES) glance_store.verify_store() def _import_direct(self, image_id, stores): """Do an import of image_id to the given stores.""" body = {'method': {'name': 'glance-direct'}, 'stores': stores, 'all_stores': False} return self.api_post( '/v2/images/%s/import' % image_id, json=body) def _mock_wait_volume_status(self, volume, status_transition, status_expected): volume.status = status_expected return volume @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @mock.patch.object(cinder, 'open') @mock.patch('glance_store._drivers.cinder.Store._wait_volume_status') @mock.patch.object(strutils, 'mask_dict_password') @mock.patch.object(socket, 'getaddrinfo') def test_create_image(self, mock_host_addr, mock_mask_pass, mock_wait, mock_open, mock_connector, mock_chown, mocked_cc, mock_base): # setup multiple cinder stores self.setup_multiple_stores() self.start_server() mocked_cc.return_value = self.cinder_store_mock mock_wait.side_effect = self._mock_wait_volume_status mock_host_addr.return_value = self.fake_socket_return # create an image image_id = self._create_and_import(stores=['store1']) image = self.api_get('/v2/images/%s' % image_id).json # verify image is created with new location url self.assertEqual('cinder://store1/%s' % self.vol_id, image['locations'][0]['url']) self.assertEqual('store1', image['locations'][0]['metadata']['store']) # NOTE(whoami-rajat): These are internals called by glance_store, so # we want to make sure they got hit, but not be too strict about how. mocked_cc.assert_called() mock_open.assert_called() mock_chown.assert_called() mock_connector.get_connector_properties.assert_called() @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @mock.patch.object(cinder, 'open') @mock.patch('glance_store._drivers.cinder.Store._wait_volume_status') @mock.patch.object(strutils, 'mask_dict_password') @mock.patch.object(socket, 'getaddrinfo') def test_migrate_image_after_upgrade(self, mock_host_addr, mock_mask_pass, mock_wait, mock_open, mock_connector, mock_chown, mocked_cc, mock_base): """Test to check if an image is successfully migrated when we upgrade from a single cinder store to multiple cinder stores. """ # setup single cinder store self.setup_single_store() self.start_server() mocked_cc.return_value = self.cinder_store_mock mock_wait.side_effect = self._mock_wait_volume_status mock_host_addr.return_value = self.fake_socket_return # create image in single store image_id = self._create_and_import(stores=['store1']) image = self.api_get('/v2/images/%s' % image_id).json # check the location url is in old format self.assertEqual('cinder://%s' % self.vol_id, image['locations'][0]['url']) self.unset_single_store() # setup multiple cinder stores self.setup_multiple_stores() cinder.keystone_sc = mock.MagicMock() # get the image to run lazy loading image = self.api_get('/v2/images/%s' % image_id).json # verify the image is updated to new format self.assertEqual('cinder://store1/%s' % self.vol_id, image['locations'][0]['url']) self.assertEqual('store1', image['locations'][0]['metadata']['store']) image = self.api_get('/v2/images/%s' % image_id).json # verify the image location url is consistent self.assertEqual('cinder://store1/%s' % self.vol_id, image['locations'][0]['url']) # NOTE(whoami-rajat): These are internals called by glance_store, so # we want to make sure they got hit, but not be too strict about how. mocked_cc.assert_called() mock_open.assert_called() mock_chown.assert_called() mock_connector.get_connector_properties.assert_called() @mock.patch.object(base, 'connector') @mock.patch.object(cinderclient, 'Client') @mock.patch.object(cinder.Store, 'temporary_chown') @mock.patch.object(cinder, 'connector') @mock.patch.object(cinder, 'open') @mock.patch('glance_store._drivers.cinder.Store._wait_volume_status') @mock.patch.object(strutils, 'mask_dict_password') @mock.patch.object(socket, 'getaddrinfo') def test_migrate_image_after_upgrade_not_owner(self, mock_host_addr, mock_mask_pass, mock_wait, mock_open, mock_connector, mock_chown, mocked_cc, mock_base): """Test to check if an image is successfully migrated when we upgrade from a single cinder store to multiple cinder stores, and that GETs from non-owners in the meantime are not interrupted. """ # setup single cinder store self.setup_single_store() self.start_server() mocked_cc.return_value = self.cinder_store_mock mock_wait.side_effect = self._mock_wait_volume_status mock_host_addr.return_value = self.fake_socket_return # create image in single store, owned by someone else image_id = self._create_and_import(stores=['store1'], extra={'visibility': 'public', 'owner': 'someoneelse'}) image = self.api_get('/v2/images/%s' % image_id).json # check the location url is in old format self.assertEqual('cinder://%s' % self.vol_id, image['locations'][0]['url']) self.unset_single_store() # setup multiple cinder stores self.setup_multiple_stores() cinder.keystone_sc = mock.MagicMock() # get the image to run lazy loading, but as a non-admin, non-owner resp = self.api_get('/v2/images/%s' % image_id, headers={'X-Roles': 'reader'}) image = resp.json # verify the image is updated to new format self.assertEqual('cinder://store1/%s' % self.vol_id, image['locations'][0]['url']) self.assertEqual('store1', image['locations'][0]['metadata']['store']) image = self.api_get('/v2/images/%s' % image_id).json # verify the image location url is consistent self.assertEqual('cinder://store1/%s' % self.vol_id, image['locations'][0]['url']) # NOTE(whoami-rajat): These are internals called by glance_store, so # we want to make sure they got hit, but not be too strict about how. mocked_cc.assert_called() mock_open.assert_called() mock_chown.assert_called() mock_connector.get_connector_properties.assert_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_member_api_policy.py0000664000175000017500000002225000000000000025233 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from oslo_utils.fixture import uuidsentinel as uuids from glance.api import policy from glance.tests import functional class TestImageMembersPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestImageMembersPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def load_data(self, share_image=False): output = {} path = "/v2/images" data = { 'name': 'shared-image', 'visibility': 'shared', } response = self.api_post(path, json=data) self.assertEqual(201, response.status_code) image_id = response.json['id'] output['image_id'] = image_id if share_image: path = '/v2/images/%s/members' % image_id data = { 'member': uuids.random_member } response = self.api_post(path, json=data) member = response.json self.assertEqual(200, response.status_code) self.assertEqual(image_id, member['image_id']) self.assertEqual('pending', member['status']) output['member_id'] = member['member_id'] return output def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestImageMembersPolicy, self).start_server() def test_member_add_basic(self): self.start_server() output = self.load_data() path = '/v2/images/%s/members' % output['image_id'] # Make sure we can add member to image (can share image) data = { 'member': uuids.random_member } response = self.api_post(path, json=data) self.assertEqual(200, response.status_code) member = response.json self.assertEqual(output['image_id'], member['image_id']) self.assertEqual('pending', member['status']) # Now disable add permissions self.set_policy_rules({ 'add_member': '!', 'get_image': '@' }) # Make sure add returns 403 response = self.api_post(path, json=data) self.assertEqual(403, response.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_member': '!', 'get_image': '!' }) # Make sure add returns 404 response = self.api_post(path, json=data) self.assertEqual(404, response.status_code) def test_member_update_basic(self): self.start_server() output = self.load_data(share_image=True) path = '/v2/images/%s/members/%s' % (output['image_id'], output['member_id']) # Make sure we can update image membership data = { 'status': 'accepted' } response = self.api_put(path, json=data) self.assertEqual(200, response.status_code) member = response.json self.assertEqual(output['image_id'], member['image_id']) self.assertEqual('accepted', member['status']) # Now disable modify permissions self.set_policy_rules({ 'modify_member': '!', 'get_image': '@' }) # Make sure update returns 403 response = self.api_put(path, json=data) self.assertEqual(403, response.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'modify_member': '!', 'get_image': '!', 'get_member': '@' }) # image owner is not allowed to update image membership so # passing different project in headers headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', }) response = self.api_put(path, headers=headers, json=data) self.assertEqual(404, response.status_code) def test_member_list_basic(self): self.start_server() output = self.load_data(share_image=True) path = '/v2/images/%s/members' % (output['image_id']) # # Make sure we can list image members response = self.api_get(path) self.assertEqual(200, response.status_code) self.assertEqual(1, len(response.json['members'])) # Now disable list permissions self.set_policy_rules({ 'get_members': '!', 'get_image': '@', }) # Make sure get returns 403 response = self.api_get(path) self.assertEqual(403, response.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_members': '!', 'get_image': '!', }) # Make sure get returns 404 response = self.api_get(path) self.assertEqual(404, response.status_code) # Now enable get_members and disable get_member self.set_policy_rules({ 'get_members': '@', 'get_member': '!', 'get_image': '@', }) # Make sure we get empty list as get_member is disabled response = self.api_get(path) self.assertEqual(200, response.status_code) self.assertEqual(0, len(response.json['members'])) def test_member_get_basic(self): self.start_server() output = self.load_data(share_image=True) path = '/v2/images/%s/members/%s' % ( output['image_id'], output['member_id']) # Make sure we can get member response = self.api_get(path) self.assertEqual(200, response.status_code) member = response.json self.assertEqual(output['image_id'], member['image_id']) self.assertEqual('pending', member['status']) # Now disable get permissions self.set_policy_rules({'get_member': '!'}) # Make sure get returns 404 as we are not exposing it response = self.api_get(path) self.assertEqual(404, response.status_code) def test_member_delete_basic(self): self.start_server() output = self.load_data(share_image=True) path = '/v2/images/%s/members/%s' % (output['image_id'], output['member_id']) # Make sure we can delete image member response = self.api_delete(path) self.assertEqual(204, response.status_code) # Verifiy it is deleted response = self.api_get(path) self.assertEqual(404, response.status_code) # Now disable delete permissions and create image # membership again self.set_policy_rules({ 'delete_member': '!', 'add_member': '@', 'get_image': '@' }) add_path = '/v2/images/%s/members' % output['image_id'] data = { 'member': uuids.random_member } response = self.api_post(add_path, json=data) self.assertEqual(200, response.status_code) # Make sure delete returns 403 response = self.api_delete(path) self.assertEqual(403, response.status_code) # Now disable delete_member and get_image permission and make # sure you will get 404 Not Found self.set_policy_rules({ 'delete_member': '!', 'get_image': '!', 'get_member': '@' }) response = self.api_delete(path) self.assertEqual(404, response.status_code) def test_image_sharing_not_allowed(self): # This test verifies that image having visibility other than # shared is not allowed for sharing self.start_server() path = "/v2/images" for visibility in ('community', 'private', 'public'): data = { 'name': '%s-image' % visibility, 'visibility': visibility, } # create image response = self.api_post(path, json=data) image = response.json self.assertEqual(201, response.status_code) self.assertEqual(visibility, image['visibility']) # Sharing image should return 403 response member_path = '/v2/images/%s/members' % image['id'] data = { 'member': uuids.random_member } response = self.api_post(member_path, json=data) self.assertEqual(403, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_namespace_api_policy.py0000664000175000017500000007500600000000000027414 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional GLOBAL_NAMESPACE_DATA = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "resource_type_associations": [{ "name": "MyResourceType", "prefix": "prefix_", "properties_target": "temp" }], "objects": [{ "name": "MyObject", "description": "My object for My namespace", "properties": { "test_property": { "title": "test_property", "description": "Test property for My object", "type": "string" }, } }], "tags": [{ "name": "MyTag", }], "properties": { "TestProperty": { "title": "MyTestProperty", "description": "Test Property for My namespace", "type": "string" }, }, } NAME_SPACE1 = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } NAME_SPACE2 = { "namespace": "MySecondNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } class TestMetadefNamespacesPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestMetadefNamespacesPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestMetadefNamespacesPolicy, self).start_server() def _verify_forbidden_converted_to_not_found(self, path, method, json=None): # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need private namespace # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', 'X-Roles': 'member', }) resp = self.api_request(method, path, headers=headers, json=json) self.assertEqual(404, resp.status_code) def test_namespace_list_basic(self): self.start_server() # First make sure create private namespace works with default policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) # First make sure create public namespace works with default policy path = '/v2/metadefs/namespaces' NAME_SPACE2["visibility"] = 'public' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE2) self.assertEqual('MySecondNamespace', md_resource['namespace']) # Now make sure 'get_metadef_namespaces' allows user to get all the # namespaces resp = self.api_get(path) md_resource = resp.json self.assertEqual(2, len(md_resource['namespaces'])) # Now disable get_metadef_namespaces permissions and make sure any # other attempts fail self.set_policy_rules({ 'get_metadef_namespaces': '!', 'get_metadef_namespace': '@', }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) def test_namespace_list_with_resource_types(self): self.start_server() # First make sure create namespace works with default policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=GLOBAL_NAMESPACE_DATA) self.assertEqual('MyNamespace', md_resource['namespace']) # Now make sure 'get_metadef_namespaces' allows user to get all the # namespaces with associated resource types resp = self.api_get(path) md_resource = resp.json self.assertEqual(1, len(md_resource['namespaces'])) # Verify that response includes associated resource types as well for namespace_obj in md_resource['namespaces']: self.assertIn('resource_type_associations', namespace_obj) # Now disable list_metadef_resource_types permissions and make sure # you get forbidden response self.set_policy_rules({ 'get_metadef_namespaces': '@', 'get_metadef_namespace': '@', 'list_metadef_resource_types': '!' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now enable list_metadef_resource_types and get_metadef_namespaces # permissions and disable get_metadef_namespace permission to make sure # you will get empty list as a response self.set_policy_rules({ 'get_metadef_namespaces': '@', 'get_metadef_namespace': '!', 'list_metadef_resource_types': '@' }) resp = self.api_get(path) md_resource = resp.json self.assertEqual(0, len(md_resource['namespaces'])) # Verify that response does not includes associated resource types for namespace_obj in md_resource['namespaces']: self.assertNotIn('resource_type_associations', namespace_obj) def test_namespace_create_basic(self): self.start_server() # First make sure create namespace works with default policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) # Now disable add_metadef_namespace permissions and make sure any other # attempts fail self.set_policy_rules({ 'add_metadef_namespace': '!', 'get_metadef_namespace': '@' }) resp = self.api_post(path, json=NAME_SPACE2) self.assertEqual(403, resp.status_code) def test_namespace_create_with_resource_type_associations(self): self.start_server() # First make sure you can create namespace and resource type # associations with default policy path = '/v2/metadefs/namespaces' data = { "resource_type_associations": [{ "name": "MyResourceType", "prefix": "prefix_", "properties_target": "temp" }], } data.update(NAME_SPACE1) md_resource = self._create_metadef_resource(path=path, data=data) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual( 'MyResourceType', md_resource['resource_type_associations'][0]['name']) # Now disable add_metadef_resource_type_association permissions and # make sure that even you have permission to create namespace the # request will fail self.set_policy_rules({ 'add_metadef_resource_type_association': '!', 'get_metadef_namespace': '@' }) data.update(NAME_SPACE2) resp = self.api_post(path, json=data) self.assertEqual(403, resp.status_code) def test_namespace_create_with_objects(self): self.start_server() # First make sure you can create namespace and objects # with default policy path = '/v2/metadefs/namespaces' data = { "objects": [{ "name": "MyObject", "description": "My object for My namespace", "properties": { "test_property": { "title": "test_property", "description": "Test property for My object", "type": "string" }, } }], } data.update(NAME_SPACE1) md_resource = self._create_metadef_resource(path=path, data=data) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual( 'MyObject', md_resource['objects'][0]['name']) # Now disable add_metadef_object permissions and # make sure that even you have permission to create namespace the # request will fail self.set_policy_rules({ 'add_metadef_object': '!', 'get_metadef_namespace': '@' }) data.update(NAME_SPACE2) resp = self.api_post(path, json=data) self.assertEqual(403, resp.status_code) def test_namespace_create_with_tags(self): self.start_server() # First make sure you can create namespace and tags # with default policy path = '/v2/metadefs/namespaces' data = { "tags": [{ "name": "MyTag", }], } data.update(NAME_SPACE1) md_resource = self._create_metadef_resource(path=path, data=data) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual( 'MyTag', md_resource['tags'][0]['name']) # Now disable add_metadef_object permissions and # make sure that even you have permission to create namespace the # request will fail data.update(NAME_SPACE2) self.set_policy_rules({ 'add_metadef_tag': '!', 'get_metadef_namespace': '@' }) resp = self.api_post(path, json=data) self.assertEqual(403, resp.status_code) def test_namespace_create_with_properties(self): self.start_server() # First make sure you can create namespace and properties # with default policy path = '/v2/metadefs/namespaces' data = { "properties": { "TestProperty": { "title": "MyTestProperty", "description": "Test Property for My namespace", "type": "string" }, } } data.update(NAME_SPACE1) md_resource = self._create_metadef_resource(path=path, data=data) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual( 'MyTestProperty', md_resource['properties']['TestProperty']['title']) # Now disable add_metadef_property permissions and # make sure that even you have permission to create namespace the # request will fail data.update(NAME_SPACE2) self.set_policy_rules({ 'add_metadef_property': '!', 'get_metadef_namespace': '@' }) resp = self.api_post(path, json=data) self.assertEqual(403, resp.status_code) def test_namespace_get_basic(self): self.start_server() # First make sure create namespace works with default policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=GLOBAL_NAMESPACE_DATA) self.assertEqual('MyNamespace', md_resource['namespace']) # Now make sure get_metadef_namespace will return all associated # resources in the response as every policy is open. path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) md_resource = resp.json self.assertEqual('MyNamespace', md_resource['namespace']) self.assertIn('objects', md_resource) self.assertIn('resource_type_associations', md_resource) self.assertIn('tags', md_resource) self.assertIn('properties', md_resource) # Now disable get_metadef_namespace policy to ensure that you are # forbidden to fulfill the request and get 404 not found self.set_policy_rules({'get_metadef_namespace': '!'}) path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(404, resp.status_code) # Now try to get the same namespace by different user self.set_policy_rules({'get_metadef_namespace': '@'}) self._verify_forbidden_converted_to_not_found(path, 'GET') # Now disable get_metadef_objects policy to ensure that you will # get forbidden response self.set_policy_rules({ 'get_metadef_objects': '!', 'get_metadef_namespace': '@', 'list_metadef_resource_types': '@', 'get_metadef_properties': '@', 'get_metadef_tags': '@' }) path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable list_metadef_resource_types policy to ensure that you # will get forbidden response self.set_policy_rules({ 'get_metadef_objects': '@', 'get_metadef_namespace': '@', 'list_metadef_resource_types': '!', 'get_metadef_properties': '@', 'get_metadef_tags': '@' }) path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable get_metadef_properties policy to ensure that you will # ger forbidden response self.set_policy_rules({ 'get_metadef_objects': '@', 'get_metadef_namespace': '@', 'list_metadef_resource_types': '@', 'get_metadef_properties': '!', 'get_metadef_tags': '@' }) path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable get_metadef_tags policy to ensure that you will # get forbidden response self.set_policy_rules({ 'get_metadef_objects': '@', 'get_metadef_namespace': '@', 'list_metadef_resource_types': '@', 'get_metadef_properties': '@', 'get_metadef_tags': '!' }) path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(403, resp.status_code) def test_namespace_update_basic(self): self.start_server() # First make sure create namespace works with default policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual('private', md_resource['visibility']) # Now ensure you are able to update the namespace path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] data = { 'visibility': 'public', 'namespace': md_resource['namespace'], } resp = self.api_put(path, json=data) md_resource = resp.json self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual('public', md_resource['visibility']) # Now disable modify_metadef_namespace permissions and make sure # any other attempts results in 403 forbidden self.set_policy_rules({ 'modify_metadef_namespace': '!', 'get_metadef_namespace': '@', }) resp = self.api_put(path, json=data) self.assertEqual(403, resp.status_code) # Now enable modify_metadef_namespace and get_metadef_namespace # permissions and make sure modifying non existing results in # 404 NotFound self.set_policy_rules({ 'modify_metadef_namespace': '@', 'get_metadef_namespace': '@', }) path = '/v2/metadefs/namespaces/non-existing' resp = self.api_put(path, json=data) self.assertEqual(404, resp.status_code) # Note for reviewers, this causes our "check get if modify fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.set_policy_rules({ 'modify_metadef_namespace': '!', 'get_metadef_namespace': '!', }) path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] resp = self.api_put(path, json=data) self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'modify_metadef_namespace': '@', 'get_metadef_namespace': '@', }) # Reset visibility to private # Now ensure you are able to update the namespace path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] data = { 'visibility': 'private', 'namespace': md_resource['namespace'], } resp = self.api_put(path, json=data) md_resource = resp.json self.assertEqual('MyNamespace', md_resource['namespace']) self.assertEqual('private', md_resource['visibility']) # Now try to update the same namespace by different user self._verify_forbidden_converted_to_not_found(path, 'PUT', json=data) def test_namespace_delete_basic(self): def _create_private_namespace(fn_call, data): path = '/v2/metadefs/namespaces' return fn_call(path=path, data=data) self.start_server() # First make sure create namespace works with default policy md_resource = _create_private_namespace( self._create_metadef_resource, NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) # Now ensure you are able to delete the namespace path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that namespace is deleted path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) self.assertEqual(404, resp.status_code) # Now create another namespace to check deletion is not allowed md_resource = _create_private_namespace( self._create_metadef_resource, NAME_SPACE2) self.assertEqual('MySecondNamespace', md_resource['namespace']) # Now disable delete_metadef_namespace permissions and make sure # any other attempts fail path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now enable both permissions and make sure deleting non # exsting namespace returns 404 NotFound self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@' }) path = '/v2/metadefs/namespaces/non-existing' resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '!', }) path = '/v2/metadefs/namespaces/%s' % md_resource['namespace'] resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') def test_namespace_delete_objects_basic(self): self.start_server() # First make sure create namespace and object works with default # policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path, data=GLOBAL_NAMESPACE_DATA) self.assertEqual('MyNamespace', md_resource['namespace']) self.assertIn('objects', md_resource) # Now ensure you are able to delete the object(s) from namespace path = '/v2/metadefs/namespaces/%s/objects' % md_resource['namespace'] resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that object from namespace is deleted but namespace is # available path = "/v2/metadefs/namespaces/%s" % md_resource['namespace'] resp = self.api_get(path) md_resource = resp.json self.assertNotIn('objects', md_resource) self.assertEqual('MyNamespace', md_resource['namespace']) # Now add another object to the namespace path = '/v2/metadefs/namespaces/%s/objects' % md_resource['namespace'] data = { "name": "MyObject", "description": "My object for My namespace", "properties": { "test_property": { "title": "test_property", "description": "Test property for My object", "type": "string" }, } } md_object = self._create_metadef_resource(path, data=data) self.assertEqual('MyObject', md_object['name']) # Now disable delete_metadef_namespace permissions and make sure # any other attempts to delete objects fails path = '/v2/metadefs/namespaces/%s/objects' % md_resource['namespace'] self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now enable both permissions and make sure # deleting objects for non existing namespace returns 404 Not found path = '/v2/metadefs/namespaces/non-existing/objects' self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metaded_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '!', }) path = '/v2/metadefs/namespaces/%s/objects' % md_resource['namespace'] resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') def test_namespace_delete_properties_basic(self): self.start_server() # First make sure create namespace and properties works with default # policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path, data=GLOBAL_NAMESPACE_DATA) namespace = md_resource['namespace'] self.assertEqual('MyNamespace', namespace) self.assertIn('properties', md_resource) # Now ensure you are able to delete all properties from namespace path = '/v2/metadefs/namespaces/%s/properties' % namespace resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that properties from namespace are deleted but namespace is # available path = "/v2/metadefs/namespaces/%s" % namespace resp = self.api_get(path) md_resource = resp.json self.assertNotIn('properties', md_resource) self.assertEqual('MyNamespace', namespace) # Now add another property to the namespace path = '/v2/metadefs/namespaces/%s/properties' % namespace data = { "name": "MyProperty", "title": "test_property", "description": "Test property for My Namespace", "type": "string" } md_resource = self._create_metadef_resource(path, data=data) self.assertEqual('MyProperty', md_resource['name']) # Now disable delete_metadef_namespace permissions and make sure # any other attempts to delete properties fails path = '/v2/metadefs/namespaces/%s/properties' % namespace self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '@', }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure # deleting properties for non existing namespace returns 404 Not found path = '/v2/metadefs/namespaces/non-existing/properties' self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@', }) resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '!', }) path = '/v2/metadefs/namespaces/%s/properties' % namespace resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') def test_namespace_delete_tags_basic(self): self.start_server() # First make sure create namespace and tags works with default # policy path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path, data=GLOBAL_NAMESPACE_DATA) namespace = md_resource['namespace'] self.assertEqual('MyNamespace', namespace) self.assertIn('tags', md_resource) # Now ensure you are able to delete all properties from namespace path = '/v2/metadefs/namespaces/%s/tags' % namespace resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that tags from namespace are deleted but namespace is # available path = "/v2/metadefs/namespaces/%s" % namespace resp = self.api_get(path) md_resource = resp.json self.assertNotIn('tags', md_resource) self.assertEqual('MyNamespace', namespace) # Now add another tag to the namespace tag_name = "MyTag" path = '/v2/metadefs/namespaces/%s/tags/%s' % (namespace, tag_name) md_resource = self._create_metadef_resource(path) self.assertEqual('MyTag', md_resource['name']) # Now disable delete_metadef_namespace permissions and make sure # any other attempts to delete tags fails path = '/v2/metadefs/namespaces/%s/tags' % namespace self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now enable delete_metadef_namespace permissions and and disable # delete_metadef_tags to make sure # any other attempts to delete tags fails path = '/v2/metadefs/namespaces/%s/tags' % namespace self.set_policy_rules({ 'delete_metadef_namespace': '@', 'delete_metadef_tags': '!', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now enable all permissions and make sure deleting tags for # non existing namespace will return 404 Not found path = '/v2/metadefs/namespaces/non-existing/tags' self.set_policy_rules({ 'delete_metadef_namespace': '@', 'delete_metadef_tags': '@', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.set_policy_rules({ 'delete_metadef_namespace': '!', 'get_metadef_namespace': '!', 'delete_metadef_tags': '!' }) path = '/v2/metadefs/namespaces/%s/tags' % namespace resp = self.api_delete(path) self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_namespace': '@', 'get_metadef_namespace': '@', 'delete_metadef_tags': '@' }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_namespaces.py0000664000175000017500000004361000000000000025363 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests.functional.v2 import metadef_base class TestNamespaces(metadef_base.MetadefFunctionalTestBase): def setUp(self): super(TestNamespaces, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def test_namespace_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) namespace_loc_header = response.headers['Location'] # Returned namespace should match the created namespace with default # values of visibility=private, protected=False and owner=Context # Tenant namespace = jsonutils.loads(response.text) checked_keys = set([ 'namespace', 'display_name', 'description', 'visibility', 'self', 'schema', 'protected', 'owner', 'created_at', 'updated_at' ]) self.assertEqual(set(namespace.keys()), checked_keys) expected_namespace = { "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "private", "protected": False, "owner": self.tenant1, "self": "/v2/metadefs/namespaces/%s" % namespace_name, "schema": "/v2/schemas/metadefs/namespace" } for key, value in expected_namespace.items(): self.assertEqual(namespace[key], value, key) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Get the namespace using the returned Location header response = requests.get(namespace_loc_header, headers=self._headers()) self.assertEqual(http.OK, response.status_code) namespace = jsonutils.loads(response.text) self.assertEqual(namespace_name, namespace['namespace']) self.assertNotIn('object', namespace) self.assertEqual(self.tenant1, namespace['owner']) self.assertEqual('private', namespace['visibility']) self.assertFalse(namespace['protected']) # The namespace should be mutable path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) media_type = 'application/json' headers = self._headers({'content-type': media_type}) namespace_name = "MyNamespace-UPDATED" data = jsonutils.dumps( { "namespace": namespace_name, "display_name": "display_name-UPDATED", "description": "description-UPDATED", "visibility": "private", # Not changed "protected": True, "owner": self.tenant2 } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned namespace should reflect the changes namespace = jsonutils.loads(response.text) self.assertEqual('MyNamespace-UPDATED', namespace_name) self.assertEqual('display_name-UPDATED', namespace['display_name']) self.assertEqual('description-UPDATED', namespace['description']) self.assertEqual('private', namespace['visibility']) self.assertTrue(namespace['protected']) self.assertEqual(self.tenant2, namespace['owner']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) namespace = jsonutils.loads(response.text) self.assertEqual('MyNamespace-UPDATED', namespace['namespace']) self.assertEqual('display_name-UPDATED', namespace['display_name']) self.assertEqual('description-UPDATED', namespace['description']) self.assertEqual('private', namespace['visibility']) self.assertTrue(namespace['protected']) self.assertEqual(self.tenant2, namespace['owner']) # Deletion should not work on protected namespaces path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.FORBIDDEN, response.status_code) # Unprotect namespace for deletion path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) media_type = 'application/json' headers = self._headers({'content-type': media_type}) doc = { "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": self.tenant2 } data = jsonutils.dumps(doc) response = requests.put(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Deletion should work. Deleting namespace MyNamespace path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) def test_metadef_dont_accept_illegal_bodies(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/bodytest') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'bodytest' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Test all the urls that supply data data_urls = [ '/v2/schemas/metadefs/namespace', '/v2/schemas/metadefs/namespaces', '/v2/schemas/metadefs/resource_type', '/v2/schemas/metadefs/resource_types', '/v2/schemas/metadefs/property', '/v2/schemas/metadefs/properties', '/v2/schemas/metadefs/object', '/v2/schemas/metadefs/objects', '/v2/schemas/metadefs/tag', '/v2/schemas/metadefs/tags', '/v2/metadefs/resource_types', ] for value in data_urls: path = self._url(value) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) # Put the namespace into the url test_urls = [ ('/v2/metadefs/namespaces/%s/resource_types', 'get'), ('/v2/metadefs/namespaces/%s/resource_types/type', 'delete'), ('/v2/metadefs/namespaces/%s', 'get'), ('/v2/metadefs/namespaces/%s', 'delete'), ('/v2/metadefs/namespaces/%s/objects/name', 'get'), ('/v2/metadefs/namespaces/%s/objects/name', 'delete'), ('/v2/metadefs/namespaces/%s/properties', 'get'), ('/v2/metadefs/namespaces/%s/tags/test', 'get'), ('/v2/metadefs/namespaces/%s/tags/test', 'post'), ('/v2/metadefs/namespaces/%s/tags/test', 'delete'), ] for link, method in test_urls: path = self._url(link % namespace_name) data = jsonutils.dumps(["body"]) response = getattr(requests, method)( path, headers=self._headers(), data=data) self.assertEqual(http.BAD_REQUEST, response.status_code) def _update_namespace(self, path, headers, data): # The namespace should be mutable response = requests.put(path, headers=headers, json=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned namespace should reflect the changes namespace = response.json() expected_namespace = { "namespace": data['namespace'], "display_name": data['display_name'], "description": data['description'], "visibility": data['visibility'], "protected": True, "owner": data['owner'], "self": "/v2/metadefs/namespaces/%s" % data['namespace'], "schema": "/v2/schemas/metadefs/namespace" } namespace.pop('created_at') namespace.pop('updated_at') self.assertEqual(namespace, expected_namespace) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s' % namespace['namespace']) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) namespace = response.json() namespace.pop('created_at') namespace.pop('updated_at') self.assertEqual(namespace, expected_namespace) return namespace def test_role_based_namespace_lifecycle(self): # Create public and private namespaces for tenant1 and tenant2 path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) tenant_namespaces = dict() for tenant in [self.tenant1, self.tenant2]: headers['X-Tenant-Id'] = tenant for visibility in ['public', 'private']: namespace_data = { "namespace": "%s_%s_namespace" % (tenant, visibility), "display_name": "My User Friendly Namespace", "description": "My description", "visibility": visibility, "owner": tenant } namespace = self.create_namespace(path, headers, namespace_data) self.assertNamespacesEqual(namespace, namespace_data) tenant_namespaces.setdefault(tenant, list()) tenant_namespaces[tenant].append(namespace) # Check Tenant 1 and Tenant 2 will be able to see total 3 namespaces # (two of own and 1 public of other tenant) def _get_expected_namespaces(tenant): expected_namespaces = [] for x in tenant_namespaces[tenant]: expected_namespaces.append(x['namespace']) if tenant == self.tenant1: expected_namespaces.append( tenant_namespaces[self.tenant2][0]['namespace']) else: expected_namespaces.append( tenant_namespaces[self.tenant1][0]['namespace']) return expected_namespaces # Check Tenant 1 and Tenant 2 will be able to see total 3 namespaces # (two of own and 1 public of other tenant) for tenant in [self.tenant1, self.tenant2]: path = self._url('/v2/metadefs/namespaces') headers = self._headers({'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) namespaces = response.json()['namespaces'] expected_namespaces = _get_expected_namespaces(tenant) self.assertEqual(sorted(x['namespace'] for x in namespaces), sorted(expected_namespaces)) def _check_namespace_access(namespaces, tenant): headers = self._headers({'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) for namespace in namespaces: path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) headers = headers response = requests.get(path, headers=headers) if namespace['visibility'] == 'public': self.assertEqual(http.OK, response.status_code) else: self.assertEqual(http.NOT_FOUND, response.status_code) # Check Tenant 1 can access public namespace and cannot access private # namespace of Tenant 2 _check_namespace_access(tenant_namespaces[self.tenant2], self.tenant1) # Check Tenant 2 can access public namespace and cannot access private # namespace of Tenant 1 _check_namespace_access(tenant_namespaces[self.tenant1], self.tenant2) total_ns = tenant_namespaces[self.tenant1] \ + tenant_namespaces[self.tenant2] for namespace in total_ns: data = { "namespace": namespace['namespace'], "display_name": "display_name-UPDATED", "description": "description-UPDATED", "visibility": namespace['visibility'], # Not changed "protected": True, # changed "owner": namespace["owner"] # Not changed } path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) headers = self._headers({ 'X-Tenant-Id': namespace['owner'], }) # Update namespace should fail with non admin role headers['X-Roles'] = "reader,member" response = requests.put(path, headers=headers, json=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Should work with admin role headers['X-Roles'] = "admin" namespace = self._update_namespace(path, headers, data) # Deletion should fail as namespaces are protected now path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) headers['X-Roles'] = "admin" response = requests.delete(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code) # Deletion should not be allowed for non admin roles path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace['owner'] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Unprotect the namespaces before deletion headers = self._headers() for namespace in total_ns: path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) headers = headers data = { "namespace": namespace['namespace'], "protected": False, } response = requests.put(path, headers=headers, json=data) self.assertEqual(http.OK, response.status_code) # Get updated namespace set again path = self._url('/v2/metadefs/namespaces') response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) self.assertFalse(namespace['protected']) namespaces = response.json()['namespaces'] # Verify that deletion is not allowed for unprotected namespaces with # non admin role for namespace in namespaces: path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace['owner'] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete namespaces of all tenants for namespace in total_ns: path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Deleted namespace should not be returned path = self._url( '/v2/metadefs/namespaces/%s' % namespace['namespace']) response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_object_api_policy.py0000664000175000017500000003012400000000000026716 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional OBJECT1 = { "name": "MyObject", "description": "My object for My namespace", "properties": { "test_property": { "title": "test_property", "description": "Test property for My object", "type": "string" }, } } OBJECT2 = { "name": "MySecondObject", "description": "My object for My namespace", "properties": { "test_property_2": { "title": "test_property_2", "description": "Test property for My second object", "type": "string" }, } } NAME_SPACE1 = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } class TestMetadefObjectsPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestMetadefObjectsPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def load_data(self, create_objects=False): path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) if create_objects: namespace = md_resource['namespace'] path = '/v2/metadefs/namespaces/%s/objects' % namespace for obj in [OBJECT1, OBJECT2]: md_resource = self._create_metadef_resource(path=path, data=obj) self.assertEqual(obj['name'], md_resource['name']) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestMetadefObjectsPolicy, self).start_server() def _verify_forbidden_converted_to_not_found(self, path, method, json=None): # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need private namespace # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', 'X-Roles': 'member', }) resp = self.api_request(method, path, headers=headers, json=json) self.assertEqual(404, resp.status_code) def test_object_create_basic(self): self.start_server() # Create namespace self.load_data() # First make sure create object works with default policy path = '/v2/metadefs/namespaces/%s/objects' % NAME_SPACE1['namespace'] md_resource = self._create_metadef_resource(path=path, data=OBJECT1) self.assertEqual('MyObject', md_resource['name']) # Now disable add_metadef_object permissions and make sure any other # attempts fail self.set_policy_rules({ 'add_metadef_object': '!', 'get_metadef_namespace': '@' }) resp = self.api_post(path, json=OBJECT2) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_metadef_object': '!', 'get_metadef_namespace': '!' }) resp = self.api_post(path, json=OBJECT2) # Note for reviewers, this causes our "check get if add fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'add_metadef_object': '@', 'get_metadef_namespace': '@' }) self._verify_forbidden_converted_to_not_found(path, 'POST', json=OBJECT2) def test_object_list_basic(self): self.start_server() # Create namespace and objects self.load_data(create_objects=True) # First make sure list object works with default policy path = '/v2/metadefs/namespaces/%s/objects' % NAME_SPACE1['namespace'] resp = self.api_get(path) md_resource = resp.json self.assertEqual(2, len(md_resource['objects'])) # Now disable get_metadef_objects permissions and make sure any other # attempts fail self.set_policy_rules({ 'get_metadef_objects': '!', 'get_metadef_namespace': '@' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_objects': '!', 'get_metadef_namespace': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if list fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Now enable get_metadef_objects and disable # get_metadef_object permission to make sure that you will get # empty list as a response self.set_policy_rules({ 'get_metadef_objects': '@', 'get_metadef_object': '!', 'get_metadef_namespace': '@' }) resp = self.api_get(path) md_resource = resp.json self.assertEqual(0, len(md_resource['objects'])) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_objects': '@', 'get_metadef_object': '@', 'get_metadef_namespace': '@' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_object_get_basic(self): self.start_server() # Create namespace and objects self.load_data(create_objects=True) # First make sure get object works with default policy path = '/v2/metadefs/namespaces/%s/objects/%s' % ( NAME_SPACE1['namespace'], OBJECT1['name']) resp = self.api_get(path) md_resource = resp.json self.assertEqual(OBJECT1['name'], md_resource['name']) # Now disable get_metadef_object permissions and make sure any other # attempts fail self.set_policy_rules({ 'get_metadef_object': '!', 'get_metadef_namespace': '@' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_object': '!', 'get_metadef_namespace': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_object': '@', 'get_metadef_namespace': '@' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_object_update_basic(self): self.start_server() # Create namespace and objects self.load_data(create_objects=True) # First make sure list object works with default policy path = '/v2/metadefs/namespaces/%s/objects/%s' % ( NAME_SPACE1['namespace'], OBJECT1['name']) data = { "name": OBJECT1['name'], "description": "My updated description" } resp = self.api_put(path, json=data) md_resource = resp.json self.assertEqual(data['description'], md_resource['description']) # Now disable modify_metadef_object permissions and make sure any other # attempts fail data = { "name": OBJECT2['name'], "description": "My updated description" } path = '/v2/metadefs/namespaces/%s/objects/%s' % ( NAME_SPACE1['namespace'], OBJECT2['name']) self.set_policy_rules({ 'modify_metadef_object': '!', 'get_metadef_namespace': '@' }) resp = self.api_put(path, json=data) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'modify_metadef_object': '!', 'get_metadef_namespace': '!' }) resp = self.api_put(path, json=data) # Note for reviewers, this causes our "check get if modify fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'modify_metadef_object': '@', 'get_metadef_namespace': '@' }) self._verify_forbidden_converted_to_not_found(path, 'PUT', json=data) def test_object_delete_basic(self): self.start_server() # Create namespace and objects self.load_data(create_objects=True) # Now ensure you are able to delete the object path = '/v2/metadefs/namespaces/%s/objects/%s' % ( NAME_SPACE1['namespace'], OBJECT1['name']) resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that object is deleted path = "/v2/metadefs/namespaces/%s/objects/%s" % ( NAME_SPACE1['namespace'], OBJECT1['name']) resp = self.api_get(path) self.assertEqual(404, resp.status_code) # Now disable delete_metadef_object permissions and make sure # any other attempts fail path = '/v2/metadefs/namespaces/%s/objects/%s' % ( NAME_SPACE1['namespace'], OBJECT2['name']) self.set_policy_rules({ 'delete_metadef_object': '!', 'get_metadef_namespace': '@' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'delete_metadef_object': '!', 'get_metadef_namespace': '!' }) resp = self.api_delete(path) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_object': '@', 'get_metadef_namespace': '@' }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_objects.py0000664000175000017500000004475500000000000024710 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests.functional.v2 import metadef_base class TestMetadefObjects(metadef_base.MetadefFunctionalTestBase): def setUp(self): super(TestMetadefObjects, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def test_metadata_objects_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Metadata objects should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace/objects/object1') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a object path = self._url('/v2/metadefs/namespaces/MyNamespace/objects') headers = self._headers({'content-type': 'application/json'}) metadata_object_name = "object1" data = jsonutils.dumps( { "name": metadata_object_name, "description": "object1 description.", "required": [ "property1" ], "properties": { "property1": { "type": "integer", "title": "property1", "description": "property1 description", "operators": [""], "default": 100, "minimum": 100, "maximum": 30000369 }, "property2": { "type": "string", "title": "property2", "description": "property2 description ", "default": "value2", "minLength": 2, "maxLength": 50 } } } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Get the metadata object created above path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) metadata_object = jsonutils.loads(response.text) self.assertEqual("object1", metadata_object['name']) # Returned object should match the created object metadata_object = jsonutils.loads(response.text) checked_keys = set([ 'name', 'description', 'properties', 'required', 'self', 'schema', 'created_at', 'updated_at' ]) self.assertEqual(set(metadata_object.keys()), checked_keys) expected_metadata_object = { "name": metadata_object_name, "description": "object1 description.", "required": [ "property1" ], "properties": { 'property1': { 'type': 'integer', "title": "property1", 'description': 'property1 description', 'operators': [''], 'default': 100, 'minimum': 100, 'maximum': 30000369 }, "property2": { "type": "string", "title": "property2", "description": "property2 description ", "default": "value2", "minLength": 2, "maxLength": 50 } }, "self": "/v2/metadefs/namespaces/%(" "namespace)s/objects/%(object)s" % {'namespace': namespace_name, 'object': metadata_object_name}, "schema": "v2/schemas/metadefs/object" } # Simple key values checked_values = set([ 'name', 'description', ]) for key, value in expected_metadata_object.items(): if key in checked_values: self.assertEqual(metadata_object[key], value, key) # Complex key values - properties for key, value in ( expected_metadata_object["properties"]['property2'].items()): self.assertEqual( metadata_object["properties"]["property2"][key], value, key ) # The metadata_object should be mutable path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) metadata_object_name = "object1-UPDATED" data = jsonutils.dumps( { "name": metadata_object_name, "description": "desc-UPDATED", "required": [ "property2" ], "properties": { 'property1': { 'type': 'integer', "title": "property1", 'description': 'p1 desc-UPDATED', 'default': 500, 'minimum': 500, 'maximum': 1369 }, "property2": { "type": "string", "title": "property2", "description": "p2 desc-UPDATED", 'operators': [''], "default": "value2-UPDATED", "minLength": 5, "maxLength": 150 } } } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned metadata_object should reflect the changes metadata_object = jsonutils.loads(response.text) self.assertEqual('object1-UPDATED', metadata_object['name']) self.assertEqual('desc-UPDATED', metadata_object['description']) self.assertEqual('property2', metadata_object['required'][0]) updated_property1 = metadata_object['properties']['property1'] updated_property2 = metadata_object['properties']['property2'] self.assertEqual('integer', updated_property1['type']) self.assertEqual('p1 desc-UPDATED', updated_property1['description']) self.assertEqual('500', updated_property1['default']) self.assertEqual(500, updated_property1['minimum']) self.assertEqual(1369, updated_property1['maximum']) self.assertEqual([''], updated_property2['operators']) self.assertEqual('string', updated_property2['type']) self.assertEqual('p2 desc-UPDATED', updated_property2['description']) self.assertEqual('value2-UPDATED', updated_property2['default']) self.assertEqual(5, updated_property2['minLength']) self.assertEqual(150, updated_property2['maxLength']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual('object1-UPDATED', metadata_object['name']) self.assertEqual('desc-UPDATED', metadata_object['description']) self.assertEqual('property2', metadata_object['required'][0]) updated_property1 = metadata_object['properties']['property1'] updated_property2 = metadata_object['properties']['property2'] self.assertEqual('integer', updated_property1['type']) self.assertEqual('p1 desc-UPDATED', updated_property1['description']) self.assertEqual('500', updated_property1['default']) self.assertEqual(500, updated_property1['minimum']) self.assertEqual(1369, updated_property1['maximum']) self.assertEqual([''], updated_property2['operators']) self.assertEqual('string', updated_property2['type']) self.assertEqual('p2 desc-UPDATED', updated_property2['description']) self.assertEqual('value2-UPDATED', updated_property2['default']) self.assertEqual(5, updated_property2['minLength']) self.assertEqual(150, updated_property2['maxLength']) # Deletion of metadata_object object1 path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # metadata_object object1 should not exist path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) def _create_object(self, namespaces): objects = [] for namespace in namespaces: headers = self._headers({'X-Tenant-Id': namespace['owner']}) data = { "name": "object_of_%s" % (namespace['namespace']), "description": "object description.", "required": [ "property1" ], "properties": { "property1": { "type": "integer", "title": "property1", "description": "property1 description", }, } } path = self._url('/v2/metadefs/namespaces/%s/objects' % namespace['namespace']) response = requests.post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) obj_metadata = response.json() metadef_objects = dict() metadef_objects[namespace['namespace']] = obj_metadata['name'] objects.append(metadef_objects) return objects def _update_object(self, path, headers, data, namespace): response = requests.put(path, headers=headers, json=data) self.assertEqual(http.OK, response.status_code, response.text) expected_object = { 'description': data['description'], 'name': data['name'], 'properties': data['properties'], 'required': data['required'], 'schema': '/v2/schemas/metadefs/object', 'self': '/v2/metadefs/namespaces/%s/objects/%s' % (namespace, data['name']) } # Returned metadata_object should reflect the changes metadata_object = response.json() metadata_object.pop('created_at') metadata_object.pop('updated_at') self.assertEqual(metadata_object, expected_object) # Updates should persist across requests response = requests.get(path, headers=self._headers()) metadata_object = response.json() metadata_object.pop('created_at') metadata_object.pop('updated_at') self.assertEqual(metadata_object, expected_object) def test_role_base_metadata_objects_lifecycle(self): # Create public and private namespaces for tenant1 and tenant2 path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) tenant1_namespaces = [] tenant2_namespaces = [] for tenant in [self.tenant1, self.tenant2]: headers['X-Tenant-Id'] = tenant for visibility in ['public', 'private']: namespace_data = { "namespace": "%s_%s_namespace" % (tenant, visibility), "display_name": "My User Friendly Namespace", "description": "My description", "visibility": visibility, "owner": tenant } namespace = self.create_namespace(path, headers, namespace_data) self.assertNamespacesEqual(namespace, namespace_data) if tenant == self.tenant1: tenant1_namespaces.append(namespace) else: tenant2_namespaces.append(namespace) # Create a metadef object for each namespace created above tenant1_objects = self._create_object(tenant1_namespaces) tenant2_objects = self._create_object(tenant2_namespaces) def _check_object_access(objects, tenant): headers = self._headers({'content-type': 'application/json', 'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) for obj in objects: for namespace, object_name in obj.items(): path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace, object_name)) headers = headers response = requests.get(path, headers=headers) if namespace.split('_')[1] == 'public': expected = http.OK else: expected = http.NOT_FOUND self.assertEqual(expected, response.status_code) path = self._url( '/v2/metadefs/namespaces/%s/objects' % namespace) response = requests.get(path, headers=headers) self.assertEqual(expected, response.status_code) if expected == http.OK: resp_objs = response.json()['objects'] self.assertEqual( sorted(obj.values()), sorted([x['name'] for x in resp_objs])) # Check Tenant 1 can access objects of all public namespace # and cannot access object of private namespace of Tenant 2 _check_object_access(tenant2_objects, self.tenant1) # Check Tenant 2 can access objects of public namespace and # cannot access objects of private namespace of Tenant 1 _check_object_access(tenant1_objects, self.tenant2) # Update objects with admin and non admin role total_objects = tenant1_objects + tenant2_objects for obj in total_objects: for namespace, object_name in obj.items(): data = { "name": object_name, "description": "desc-UPDATED", "required": [ "property1" ], "properties": { 'property1': { 'type': 'integer', "title": "property1", 'description': 'p1 desc-UPDATED', } } } # Update object should fail with non admin role path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace, object_name)) headers['X-Roles'] = "reader,member" response = requests.put(path, headers=headers, json=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Should work with admin role headers = self._headers({ 'X-Tenant-Id': namespace.split('_')[0]}) self._update_object(path, headers, data, namespace) # Delete object should not be allowed to non admin role for obj in total_objects: for namespace, object_name in obj.items(): path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace, object_name)) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace.split('_')[0] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete all metadef objects headers = self._headers() for obj in total_objects: for namespace, object_name in obj.items(): path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace, object_name)) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Deleted objects should not be exist response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_properties.py0000664000175000017500000003771200000000000025446 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests.functional.v2 import metadef_base class TestNamespaceProperties(metadef_base.MetadefFunctionalTestBase): def setUp(self): super(TestNamespaceProperties, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def test_properties_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' resource_type_name = 'MyResourceType' resource_type_prefix = 'MyPrefix' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner", "resource_type_associations": [ { "name": resource_type_name, "prefix": resource_type_prefix } ] }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Property1 should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace/properties' '/property1') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a property path = self._url('/v2/metadefs/namespaces/MyNamespace/properties') headers = self._headers({'content-type': 'application/json'}) property_name = "property1" data = jsonutils.dumps( { "name": property_name, "type": "integer", "title": "property1", "description": "property1 description", "default": 100, "minimum": 100, "maximum": 30000369, "readonly": False, } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Get the property created above path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) property_object = jsonutils.loads(response.text) self.assertEqual("integer", property_object['type']) self.assertEqual("property1", property_object['title']) self.assertEqual("property1 description", property_object[ 'description']) self.assertEqual('100', property_object['default']) self.assertEqual(100, property_object['minimum']) self.assertEqual(30000369, property_object['maximum']) # Get the property with specific resource type association path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( namespace_name, property_name, '='.join(['?resource_type', resource_type_name]))) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Get the property with prefix and specific resource type association property_name_with_prefix = ''.join([resource_type_prefix, property_name]) path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( namespace_name, property_name_with_prefix, '='.join([ '?resource_type', resource_type_name]))) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) property_object = jsonutils.loads(response.text) self.assertEqual("integer", property_object['type']) self.assertEqual("property1", property_object['title']) self.assertEqual("property1 description", property_object[ 'description']) self.assertEqual('100', property_object['default']) self.assertEqual(100, property_object['minimum']) self.assertEqual(30000369, property_object['maximum']) self.assertFalse(property_object['readonly']) # Returned property should match the created property property_object = jsonutils.loads(response.text) checked_keys = set([ 'name', 'type', 'title', 'description', 'default', 'minimum', 'maximum', 'readonly', ]) self.assertEqual(set(property_object.keys()), checked_keys) expected_metadata_property = { "type": "integer", "title": "property1", "description": "property1 description", "default": '100', "minimum": 100, "maximum": 30000369, "readonly": False, } for key, value in expected_metadata_property.items(): self.assertEqual(property_object[key], value, key) # The property should be mutable path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) property_name = "property1-UPDATED" data = jsonutils.dumps( { "name": property_name, "type": "string", "title": "string property", "description": "desc-UPDATED", "operators": [""], "default": "value-UPDATED", "minLength": 5, "maxLength": 10, "readonly": True, } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned property should reflect the changes property_object = jsonutils.loads(response.text) self.assertEqual('string', property_object['type']) self.assertEqual('desc-UPDATED', property_object['description']) self.assertEqual('value-UPDATED', property_object['default']) self.assertEqual([""], property_object['operators']) self.assertEqual(5, property_object['minLength']) self.assertEqual(10, property_object['maxLength']) self.assertTrue(property_object['readonly']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual('string', property_object['type']) self.assertEqual('desc-UPDATED', property_object['description']) self.assertEqual('value-UPDATED', property_object['default']) self.assertEqual([""], property_object['operators']) self.assertEqual(5, property_object['minLength']) self.assertEqual(10, property_object['maxLength']) # Deletion of property property1 path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # property1 should not exist path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) def _create_properties(self, namespaces): properties = [] for namespace in namespaces: headers = self._headers({'X-Tenant-Id': namespace['owner']}) data = { "name": "property_of_%s" % (namespace['namespace']), "type": "integer", "title": "property", "description": "property description", } path = self._url('/v2/metadefs/namespaces/%s/properties' % namespace['namespace']) response = requests.post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) prop_metadata = response.json() metadef_property = dict() metadef_property[namespace['namespace']] = prop_metadata['name'] properties.append(metadef_property) return properties def _update_property(self, path, headers, data): # The property should be mutable response = requests.put(path, headers=headers, json=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned property should reflect the changes property_object = response.json() self.assertEqual('string', property_object['type']) self.assertEqual(data['description'], property_object['description']) # Updates should persist across requests response = requests.get(path, headers=self._headers()) self.assertEqual('string', property_object['type']) self.assertEqual(data['description'], property_object['description']) def test_role_base_metadata_properties_lifecycle(self): # Create public and private namespaces for tenant1 and tenant2 path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) tenant1_namespaces = [] tenant2_namespaces = [] for tenant in [self.tenant1, self.tenant2]: headers['X-Tenant-Id'] = tenant for visibility in ['public', 'private']: namespace_data = { "namespace": "%s_%s_namespace" % (tenant, visibility), "display_name": "My User Friendly Namespace", "description": "My description", "visibility": visibility, "owner": tenant } namespace = self.create_namespace(path, headers, namespace_data) self.assertNamespacesEqual(namespace, namespace_data) if tenant == self.tenant1: tenant1_namespaces.append(namespace) else: tenant2_namespaces.append(namespace) # Create a metadef property for each namespace created above tenant1_properties = self._create_properties(tenant1_namespaces) tenant2_properties = self._create_properties(tenant2_namespaces) def _check_properties_access(properties, tenant): headers = self._headers({'content-type': 'application/json', 'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) for prop in properties: for namespace, property_name in prop.items(): path = \ self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace, property_name)) response = requests.get(path, headers=headers) if namespace.split('_')[1] == 'public': expected = http.OK else: expected = http.NOT_FOUND # Make sure we can see our and public properties, but not # the other tenant's self.assertEqual(expected, response.status_code) # Make sure the same holds for listing path = self._url( '/v2/metadefs/namespaces/%s/properties' % namespace) response = requests.get(path, headers=headers) self.assertEqual(expected, response.status_code) if expected == http.OK: resp_props = response.json()['properties'].values() self.assertEqual( sorted(prop.values()), sorted([x['name'] for x in resp_props])) # Check Tenant 1 can access properties of all public namespace # and cannot access properties of private namespace of Tenant 2 _check_properties_access(tenant2_properties, self.tenant1) # Check Tenant 2 can access properties of public namespace and # cannot access properties of private namespace of Tenant 1 _check_properties_access(tenant1_properties, self.tenant2) # Update properties with admin and non admin role total_properties = tenant1_properties + tenant2_properties for prop in total_properties: for namespace, property_name in prop.items(): data = { "name": property_name, "type": "string", "title": "string property", "description": "desc-UPDATED", } path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace, property_name)) # Update property should fail with non admin role headers['X-Roles'] = "reader,member" response = requests.put(path, headers=headers, json=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Should work with admin role headers = self._headers({ 'X-Tenant-Id': namespace.split('_')[0]}) self._update_property(path, headers, data) # Delete property should not be allowed to non admin role for prop in total_properties: for namespace, property_name in prop.items(): path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace, property_name)) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace.split('_')[0] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete all metadef properties headers = self._headers() for prop in total_properties: for namespace, property_name in prop.items(): path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace, property_name)) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Deleted property should not be exist response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_property_api_policy.py0000664000175000017500000003046600000000000027345 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional PROPERTY1 = { "name": "MyProperty", "title": "My Property", "description": "My Property for My Namespace", "type": "string" } PROPERTY2 = { "name": "MySecondProperty", "title": "My Second Property", "description": "My Second Property for My Namespace", "type": "string" } NAME_SPACE1 = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } class TestMetadefPropertiesPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestMetadefPropertiesPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def load_data(self, create_properties=False): path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) if create_properties: namespace = md_resource['namespace'] path = '/v2/metadefs/namespaces/%s/properties' % namespace for prop in [PROPERTY1, PROPERTY2]: md_resource = self._create_metadef_resource(path=path, data=prop) self.assertEqual(prop['name'], md_resource['name']) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestMetadefPropertiesPolicy, self).start_server() def _verify_forbidden_converted_to_not_found(self, path, method, json=None): # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need private namespace # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', 'X-Roles': 'member', }) resp = self.api_request(method, path, headers=headers, json=json) self.assertEqual(404, resp.status_code) def test_property_list_basic(self): self.start_server() # Create namespace and properties self.load_data(create_properties=True) # First make sure list property works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/properties' % namespace resp = self.api_get(path) md_resource = resp.json self.assertEqual(2, len(md_resource['properties'])) # Now disable get_metadef_properties permissions and make sure # any other attempts fail self.set_policy_rules({ 'get_metadef_properties': '!', 'get_metadef_namespace': '' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_properties': '!', 'get_metadef_namespace': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if list fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_properties': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_property_get_basic(self): self.start_server() # Create namespace and properties self.load_data(create_properties=True) # First make sure get property works with default policy path = '/v2/metadefs/namespaces/%s/properties/%s' % ( NAME_SPACE1['namespace'], PROPERTY1['name']) resp = self.api_get(path) md_resource = resp.json self.assertEqual(PROPERTY1['name'], md_resource['name']) # Now disable get_metadef_property permissions and make sure any other # attempts fail self.set_policy_rules({ 'get_metadef_property': '!', 'get_metadef_namespace': '', 'get_metadef_resource_type': '' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable get_metadef_resource_type permissions and make sure # any other attempts fail self.set_policy_rules({ 'get_metadef_property': '', 'get_metadef_namespace': '', 'get_metadef_resource_type': '!' }) url_path = "%s?resource_type='abcd'" % path resp = self.api_get(url_path) self.assertEqual(403, resp.status_code) # Now disable all permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_property': '!', 'get_metadef_namespace': '!', 'get_metadef_resource_type': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_property': '', 'get_metadef_namespace': '', 'get_metadef_resource_type': '' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_property_create_basic(self): self.start_server() # Create namespace self.load_data() # First make sure create property works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/properties' % namespace md_resource = self._create_metadef_resource(path=path, data=PROPERTY1) self.assertEqual('MyProperty', md_resource['name']) # Now disable add_metadef_property permissions and make sure any other # attempts fail self.set_policy_rules({ 'add_metadef_property': '!', 'get_metadef_namespace': '' }) resp = self.api_post(path, json=PROPERTY2) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_metadef_property': '!', 'get_metadef_namespace': '!' }) resp = self.api_post(path, json=PROPERTY2) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'add_metadef_property': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'POST', json=PROPERTY2) def test_property_update_basic(self): self.start_server() # Create namespace and properties self.load_data(create_properties=True) # First make sure update property works with default policy path = '/v2/metadefs/namespaces/%s/properties/%s' % ( NAME_SPACE1['namespace'], PROPERTY1['name']) data = { "name": PROPERTY1['name'], "title": PROPERTY1['title'], "type": PROPERTY1['type'], "description": "My updated description" } resp = self.api_put(path, json=data) md_resource = resp.json self.assertEqual(data['description'], md_resource['description']) # Now disable modify_metadef_property permissions and make sure # any other attempts fail data = { "name": PROPERTY2['name'], "title": PROPERTY2['title'], "type": PROPERTY2['type'], "description": "My updated description" } path = '/v2/metadefs/namespaces/%s/properties/%s' % ( NAME_SPACE1['namespace'], PROPERTY2['name']) self.set_policy_rules({ 'modify_metadef_property': '!', 'get_metadef_namespace': '' }) resp = self.api_put(path, json=data) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'modify_metadef_property': '!', 'get_metadef_namespace': '!' }) resp = self.api_put(path, json=data) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'modify_metadef_property': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'PUT', json=data) def test_property_delete_basic(self): self.start_server() # Create namespace and properties self.load_data(create_properties=True) # Now ensure you are able to delete the property path = '/v2/metadefs/namespaces/%s/properties/%s' % ( NAME_SPACE1['namespace'], PROPERTY1['name']) resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that property is deleted path = "/v2/metadefs/namespaces/%s/properties/%s" % ( NAME_SPACE1['namespace'], PROPERTY1['name']) resp = self.api_get(path) self.assertEqual(404, resp.status_code) # Now disable remove_metadef_property permissions and make sure # any other attempts fail path = '/v2/metadefs/namespaces/%s/properties/%s' % ( NAME_SPACE1['namespace'], PROPERTY2['name']) self.set_policy_rules({ 'remove_metadef_property': '!', 'get_metadef_namespace': '' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'remove_metadef_property': '!', 'get_metadef_namespace': '!' }) resp = self.api_delete(path) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'remove_metadef_property': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_resourcetype_api_policy.py0000664000175000017500000002405200000000000030204 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional NAME_SPACE1 = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } RESOURCETYPE_1 = { "name": "MyResourceType", "prefix": "prefix_", "properties_target": "temp" } RESOURCETYPE_2 = { "name": "MySecondResourceType", "prefix": "temp_prefix_", "properties_target": "temp_2" } class TestMetadefResourceTypesPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestMetadefResourceTypesPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def load_data(self, create_resourcetypes=False): path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) if create_resourcetypes: namespace = md_resource['namespace'] path = '/v2/metadefs/namespaces/%s/resource_types' % namespace for resource in [RESOURCETYPE_1, RESOURCETYPE_2]: md_resource = self._create_metadef_resource(path=path, data=resource) self.assertEqual(resource['name'], md_resource['name']) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestMetadefResourceTypesPolicy, self).start_server() def _verify_forbidden_converted_to_not_found(self, path, method, json=None): # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need private namespace # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', 'X-Roles': 'member', }) resp = self.api_request(method, path, headers=headers, json=json) self.assertEqual(404, resp.status_code) def test_namespace_resourcetypes_list_basic(self): self.start_server() # Create namespace and resourcetypes self.load_data(create_resourcetypes=True) # First make sure list resourcetypes works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/resource_types' % namespace resp = self.api_get(path) md_resource = resp.json self.assertEqual(2, len(md_resource['resource_type_associations'])) # Now disable list_metadef_resource_types permissions and make # sure any other attempts fail self.set_policy_rules({ 'list_metadef_resource_types': '!', 'get_metadef_namespace': '@', }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'list_metadef_resource_types': '!', 'get_metadef_namespace': '!', }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if list fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Now enable list_metadef_resource_types and disable # get_metadef_resource_type permission to make sure that you will get # empty list as a response self.set_policy_rules({ 'list_metadef_resource_types': '@', 'get_metadef_resource_type': '!', 'get_metadef_namespace': '@', }) resp = self.api_get(path) md_resource = resp.json self.assertEqual(0, len(md_resource['resource_type_associations'])) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'list_metadef_resource_types': '@', 'get_metadef_resource_type': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_resourcetypes_list_basic(self): self.start_server() # Create namespace and resourcetypes self.load_data(create_resourcetypes=True) # First make sure list resourcetypes works with default policy path = '/v2/metadefs/resource_types' resp = self.api_get(path) md_resource = resp.json # NOTE(abhishekk): /v2/metadefs/resource_types returns list which # contains all resource_types in a dictionary, so the length will # always be 1 here. self.assertEqual(1, len(md_resource)) # Now disable get_metadef_resource_type permissions and make # sure any other attempts fail self.set_policy_rules({ 'list_metadef_resource_types': '!', 'get_metadef_namespace': '@', }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) def test_resourcetype_create_basic(self): self.start_server() # Create namespace self.load_data() # First make sure create resourcetype works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/resource_types' % namespace md_resource = self._create_metadef_resource(path=path, data=RESOURCETYPE_1) self.assertEqual('MyResourceType', md_resource['name']) # Now disable add_metadef_resource_type_association permissions # and make sure any other attempts fail self.set_policy_rules({ 'add_metadef_resource_type_association': '!', 'get_metadef_namespace': '@', }) resp = self.api_post(path, json=RESOURCETYPE_2) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_metadef_resource_type_association': '!', 'get_metadef_namespace': '!', }) resp = self.api_post(path, json=RESOURCETYPE_2) # Note for reviewers, this causes our "check get if create fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'add_metadef_resource_type_association': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'POST', json=RESOURCETYPE_2) def test_object_delete_basic(self): self.start_server() # Create namespace and objects self.load_data(create_resourcetypes=True) # Now ensure you are able to delete the resource_types path = '/v2/metadefs/namespaces/%s/resource_types/%s' % ( NAME_SPACE1['namespace'], RESOURCETYPE_1['name']) resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that resource_type is deleted namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/resource_types' % namespace resp = self.api_get(path) md_resource = resp.json # assert namespace has only one resource type association self.assertEqual(1, len(md_resource['resource_type_associations'])) # assert deleted association is not present in response for resource in md_resource['resource_type_associations']: self.assertNotEqual(RESOURCETYPE_1['name'], resource['name']) # Now disable remove_metadef_resource_type_association permissions # and make sure any other attempts fail path = '/v2/metadefs/namespaces/%s/resource_types/%s' % ( NAME_SPACE1['namespace'], RESOURCETYPE_2['name']) self.set_policy_rules({ 'remove_metadef_resource_type_association': '!', 'get_metadef_namespace': '@', }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'remove_metadef_resource_type_association': '!', 'get_metadef_namespace': '!', }) resp = self.api_delete(path) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'remove_metadef_resource_type_association': '@', 'get_metadef_namespace': '@', }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_resourcetypes.py0000664000175000017500000002641400000000000026163 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests.functional.v2 import metadef_base class TestMetadefResourceTypes(metadef_base.MetadefFunctionalTestBase): def setUp(self): super(TestMetadefResourceTypes, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def test_metadef_resource_types_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner" }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Resource type should not exist path = self._url('/v2/metadefs/namespaces/%s/resource_types' % (namespace_name)) response = requests.get(path, headers=self._headers()) metadef_resource_type = jsonutils.loads(response.text) self.assertEqual( 0, len(metadef_resource_type['resource_type_associations'])) # Create a resource type path = self._url('/v2/metadefs/namespaces/MyNamespace/resource_types') headers = self._headers({'content-type': 'application/json'}) metadef_resource_type_name = "resource_type1" data = jsonutils.dumps( { "name": "resource_type1", "prefix": "hw_", "properties_target": "image", } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Get the metadef resource type created above path = self._url('/v2/metadefs/namespaces/%s/resource_types' % (namespace_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) metadef_resource_type = jsonutils.loads(response.text) self.assertEqual( "resource_type1", metadef_resource_type['resource_type_associations'][0]['name']) # Returned resource type should match the created resource type resource_type = jsonutils.loads(response.text) checked_keys = set([ u'name', u'prefix', u'properties_target', u'created_at', u'updated_at' ]) self.assertEqual( set(resource_type['resource_type_associations'][0].keys()), checked_keys) expected_metadef_resource_types = { "name": metadef_resource_type_name, "prefix": "hw_", "properties_target": "image", } # Simple key values checked_values = set([ u'name', u'prefix', u'properties_target', ]) for key, value in expected_metadef_resource_types.items(): if key in checked_values: self.assertEqual( resource_type['resource_type_associations'][0][key], value, key) # Deassociate of metadef resource type resource_type1 path = self._url('/v2/metadefs/namespaces/%s/resource_types/%s' % (namespace_name, metadef_resource_type_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # resource_type1 should not exist path = self._url('/v2/metadefs/namespaces/%s/resource_types' % (namespace_name)) response = requests.get(path, headers=self._headers()) metadef_resource_type = jsonutils.loads(response.text) self.assertEqual( 0, len(metadef_resource_type['resource_type_associations'])) def _create_resource_type(self, namespaces): resource_types = [] for namespace in namespaces: headers = self._headers({'X-Tenant-Id': namespace['owner']}) data = { "name": "resource_type_of_%s" % (namespace['namespace']), "prefix": "hw_", "properties_target": "image" } path = self._url('/v2/metadefs/namespaces/%s/resource_types' % (namespace['namespace'])) response = requests.post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) rs_type = response.json() resource_type = dict() resource_type[namespace['namespace']] = rs_type['name'] resource_types.append(resource_type) return resource_types def test_role_base_metadef_resource_types_lifecycle(self): # Create public and private namespaces for tenant1 and tenant2 path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) tenant1_namespaces = [] tenant2_namespaces = [] for tenant in [self.tenant1, self.tenant2]: headers['X-Tenant-Id'] = tenant for visibility in ['public', 'private']: namespace_data = { "namespace": "%s_%s_namespace" % (tenant, visibility), "display_name": "My User Friendly Namespace", "description": "My description", "visibility": visibility, "owner": tenant } namespace = self.create_namespace(path, headers, namespace_data) self.assertNamespacesEqual(namespace, namespace_data) if tenant == self.tenant1: tenant1_namespaces.append(namespace) else: tenant2_namespaces.append(namespace) # Create a resource type for each namespace created above tenant1_resource_types = self._create_resource_type( tenant1_namespaces) tenant2_resource_types = self._create_resource_type( tenant2_namespaces) def _check_resource_type_access(namespaces, tenant): headers = self._headers({'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) for namespace in namespaces: path = self._url('/v2/metadefs/namespaces/%s/resource_types' % (namespace['namespace'])) response = requests.get(path, headers=headers) if namespace['visibility'] == 'public': self.assertEqual(http.OK, response.status_code) else: self.assertEqual(http.NOT_FOUND, response.status_code) def _check_resource_types(tenant, total_rs_types): # Resource types are visible across tenants for all users path = self._url('/v2/metadefs/resource_types') headers = self._headers({'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) response = requests.get(path, headers=headers) self.assertEqual(http.OK, response.status_code) metadef_resource_type = response.json() # The resource types list count should be same as the total # resource types created across the tenants. self.assertEqual( sorted(x['name'] for x in metadef_resource_type['resource_types']), sorted(value for x in total_rs_types for key, value in x.items())) # Check Tenant 1 can access resource types of all public namespace # and cannot access resource type of private namespace of Tenant 2 _check_resource_type_access(tenant2_namespaces, self.tenant1) # Check Tenant 2 can access public namespace and cannot access private # namespace of Tenant 1 _check_resource_type_access(tenant1_namespaces, self.tenant2) # List all resource type irrespective of namespace & tenant are # accessible non admin roles total_resource_types = tenant1_resource_types + tenant2_resource_types _check_resource_types(self.tenant1, total_resource_types) _check_resource_types(self.tenant2, total_resource_types) # Disassociate resource type should not be allowed to non admin role for resource_type in total_resource_types: for namespace, rs_type in resource_type.items(): path = \ self._url('/v2/metadefs/namespaces/%s/resource_types/%s' % (namespace, rs_type)) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace.split('_')[0] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Disassociate of all metadef resource types headers = self._headers() for resource_type in total_resource_types: for namespace, rs_type in resource_type.items(): path = \ self._url('/v2/metadefs/namespaces/%s/resource_types/%s' % (namespace, rs_type)) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Disassociated resource type should not be exist # When the specified resource type is not associated with given # namespace then it returns empty list in response instead of # raising not found error path = self._url( '/v2/metadefs/namespaces/%s/resource_types' % namespace) response = requests.get(path, headers=headers) metadef_resource_type = response.json() self.assertEqual( [], metadef_resource_type['resource_type_associations']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_tag_api_policy.py0000664000175000017500000003477400000000000026242 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from glance.api import policy from glance.tests import functional TAG1 = { "name": "MyTag" } TAG2 = { "name": "MySecondTag" } NAME_SPACE1 = { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description" } class TestMetadefTagsPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestMetadefTagsPolicy, self).setUp() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) def load_data(self, create_tags=False): path = '/v2/metadefs/namespaces' md_resource = self._create_metadef_resource(path=path, data=NAME_SPACE1) self.assertEqual('MyNamespace', md_resource['namespace']) if create_tags: namespace = md_resource['namespace'] for tag in [TAG1, TAG2]: path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, tag['name']) md_resource = self._create_metadef_resource(path=path) self.assertEqual(tag['name'], md_resource['name']) def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestMetadefTagsPolicy, self).start_server() def _verify_forbidden_converted_to_not_found(self, path, method, json=None): # Note for other reviewers, these tests runs by default using # admin role, to test this scenario we need private namespace # of current project to be accessed by other projects non-admin # user. headers = self._headers({ 'X-Tenant-Id': 'fake-tenant-id', 'X-Roles': 'member', }) resp = self.api_request(method, path, headers=headers, json=json) self.assertEqual(404, resp.status_code) def test_tag_create_basic(self): self.start_server() # Create namespace self.load_data() # First make sure create tag works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, TAG1['name']) md_resource = self._create_metadef_resource(path=path) self.assertEqual('MyTag', md_resource['name']) # Now disable add_metadef_tag permissions and make sure any other # attempts fail self.set_policy_rules({ 'add_metadef_tag': '!', 'get_metadef_namespace': '' }) path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, TAG2['name']) resp = self.api_post(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_metadef_tag': '!', 'get_metadef_namespace': '!' }) resp = self.api_post(path) # Note for reviewers, this causes our "check get if add fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'add_metadef_tag': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'POST') def test_tags_create_basic(self): self.start_server() # Create namespace self.load_data() # First make sure create tags works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/tags' % namespace data = {"tags": [TAG1, TAG2]} md_resource = self._create_metadef_resource(path=path, data=data) self.assertEqual(2, len(md_resource['tags'])) # Now disable add_metadef_tags permissions and make sure any other # attempts fail self.set_policy_rules({ 'add_metadef_tags': '!', 'get_metadef_namespace': '' }) path = '/v2/metadefs/namespaces/%s/tags' % namespace data = { "tags": [{ "name": "sampe-tag-1" }, { "name": "sampe-tag-2" }] } resp = self.api_post(path, json=data) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'add_metadef_tags': '!', 'get_metadef_namespace': '!' }) resp = self.api_post(path, json=data) # Note for reviewers, this causes our "check get if add fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'add_metadef_tags': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'POST', json=data) def test_tag_list_basic(self): self.start_server() # Create namespace and tags self.load_data(create_tags=True) # First make sure list tag works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/tags' % namespace resp = self.api_get(path) md_resource = resp.json self.assertEqual(2, len(md_resource['tags'])) # Now disable get_metadef_tags permissions and make sure # any other attempts fail self.set_policy_rules({ 'get_metadef_tags': '!', 'get_metadef_namespace': '' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable bot permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_tags': '!', 'get_metadef_namespace': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if list fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_tags': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_tag_get_basic(self): self.start_server() # Create namespace and tags self.load_data(create_tags=True) # First make sure get tag works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, TAG1['name']) resp = self.api_get(path) md_resource = resp.json self.assertEqual('MyTag', md_resource['name']) # Now disable get_metadef_tag permissions and make sure # any other attempts fail self.set_policy_rules({ 'get_metadef_tag': '!', 'get_metadef_namespace': '' }) resp = self.api_get(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'get_metadef_tag': '!', 'get_metadef_namespace': '!' }) resp = self.api_get(path) # Note for reviewers, this causes our "check get if get fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_tag': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'GET') def test_tag_update_basic(self): self.start_server() # Create namespace and tags self.load_data(create_tags=True) # First make sure modify tag works with default policy namespace = NAME_SPACE1['namespace'] path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, TAG1['name']) data = { 'name': "MyTagUpdated" } resp = self.api_put(path, json=data) md_resource = resp.json self.assertEqual('MyTagUpdated', md_resource['name']) # Now disable modify_metadef_tag permissions and make sure # any other attempts fail self.set_policy_rules({ 'modify_metadef_tag': '!', 'get_metadef_namespace': '' }) path = '/v2/metadefs/namespaces/%s/tags/%s' % ( namespace, TAG2['name']) data = { 'name': "MySecondTagUpdated" } resp = self.api_put(path, json=data) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'modify_metadef_tag': '!', 'get_metadef_namespace': '!' }) resp = self.api_put(path, json=data) # Note for reviewers, this causes our "check get if modify fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'get_metadef_tag': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'PUT', json=data) def test_tag_delete_basic(self): self.start_server() # Create namespace and tags self.load_data(create_tags=True) # Now ensure you are able to delete the tag path = '/v2/metadefs/namespaces/%s/tags/%s' % ( NAME_SPACE1['namespace'], TAG1['name']) resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that property is deleted path = "/v2/metadefs/namespaces/%s/tags/%s" % ( NAME_SPACE1['namespace'], TAG1['name']) resp = self.api_get(path) self.assertEqual(404, resp.status_code) # Now disable delete_metadef_tag permissions and make sure # any other attempts fail path = '/v2/metadefs/namespaces/%s/tags/%s' % ( NAME_SPACE1['namespace'], TAG2['name']) self.set_policy_rules({ 'delete_metadef_tag': '!', 'get_metadef_namespace': '' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'delete_metadef_tag': '!', 'get_metadef_namespace': '!' }) resp = self.api_delete(path) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_tag': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') def test_tags_delete_basic(self): self.start_server() # Create namespace and tags self.load_data(create_tags=True) # Now ensure you are able to delete all the tags path = '/v2/metadefs/namespaces/%s/tags' % NAME_SPACE1['namespace'] resp = self.api_delete(path) self.assertEqual(204, resp.status_code) # Verify that tags are deleted path = "/v2/metadefs/namespaces/%s/tags" % NAME_SPACE1['namespace'] resp = self.api_get(path) md_resource = resp.json self.assertEqual(0, len(md_resource['tags'])) # Now disable delete_metadef_tags permissions and make sure # any other attempts fail path = "/v2/metadefs/namespaces/%s/tags" % NAME_SPACE1['namespace'] self.set_policy_rules({ 'delete_metadef_tags': '!', 'get_metadef_namespace': '' }) resp = self.api_delete(path) self.assertEqual(403, resp.status_code) # Now disable both permissions and make sure you will get # 404 Not Found self.set_policy_rules({ 'delete_metadef_tags': '!', 'get_metadef_namespace': '!' }) resp = self.api_delete(path) # Note for reviewers, this causes our "check get if delete fails" # logic to return 404 as we expect, but not related to the latest # rev that checks the namespace get operation first. self.assertEqual(404, resp.status_code) # Ensure accessing non visible namespace will catch 403 and # return 404 to user self.set_policy_rules({ 'delete_metadef_tags': '', 'get_metadef_namespace': '' }) self._verify_forbidden_converted_to_not_found(path, 'DELETE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_metadef_tags.py0000664000175000017500000004002000000000000024172 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests.functional.v2 import metadef_base class TestMetadefTags(metadef_base.MetadefFunctionalTestBase): def setUp(self): super(TestMetadefTags, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def test_metadata_tags_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner"} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Metadata tag should not exist metadata_tag_name = "tag1" path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create the metadata tag headers = self._headers({'content-type': 'application/json'}) response = requests.post(path, headers=headers) self.assertEqual(http.CREATED, response.status_code) # Get the metadata tag created above response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) metadata_tag = jsonutils.loads(response.text) self.assertEqual(metadata_tag_name, metadata_tag['name']) # Returned tag should match the created tag metadata_tag = jsonutils.loads(response.text) checked_keys = set([ 'name', 'created_at', 'updated_at' ]) self.assertEqual(checked_keys, set(metadata_tag.keys())) expected_metadata_tag = { "name": metadata_tag_name } # Simple key values checked_values = set([ 'name' ]) for key, value in expected_metadata_tag.items(): if key in checked_values: self.assertEqual(metadata_tag[key], value, key) # Try to create a duplicate metadata tag headers = self._headers({'content-type': 'application/json'}) response = requests.post(path, headers=headers) self.assertEqual(http.CONFLICT, response.status_code) # The metadata_tag should be mutable path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) metadata_tag_name = "tag1-UPDATED" data = jsonutils.dumps( { "name": metadata_tag_name } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned metadata_tag should reflect the changes metadata_tag = jsonutils.loads(response.text) self.assertEqual('tag1-UPDATED', metadata_tag['name']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual('tag1-UPDATED', metadata_tag['name']) # Deletion of metadata_tag_name path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.NO_CONTENT, response.status_code) # metadata_tag_name should not exist path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(http.NOT_FOUND, response.status_code) # Create multiple tags. path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace_name)) headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps( {"tags": [{"name": "tag1"}, {"name": "tag2"}, {"name": "tag3"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # List out the three new tags. response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(3, len(tags)) # Attempt to create bogus duplicate tag4 data = jsonutils.dumps( {"tags": [{"name": "tag4"}, {"name": "tag5"}, {"name": "tag4"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Verify the previous 3 still exist response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(3, len(tags)) # Create new tags and append to existing tags. path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace_name)) headers = self._headers({'content-type': 'application/json', 'X-Openstack-Append': 'True'}) data = jsonutils.dumps( {"tags": [{"name": "tag4"}, {"name": "tag5"}, {"name": "tag6"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # List out all six tags. response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(6, len(tags)) # Attempt to create duplicate existing tag6 data = jsonutils.dumps( {"tags": [{"name": "tag6"}, {"name": "tag7"}, {"name": "tag8"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CONFLICT, response.status_code) # Verify the previous 6 still exist response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(6, len(tags)) def _create_tags(self, namespaces): tags = [] for namespace in namespaces: headers = self._headers({'X-Tenant-Id': namespace['owner']}) tag_name = "tag_of_%s" % (namespace['namespace']) path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace['namespace'], tag_name)) response = requests.post(path, headers=headers) self.assertEqual(http.CREATED, response.status_code) tag_metadata = response.json() metadef_tags = dict() metadef_tags[namespace['namespace']] = tag_metadata['name'] tags.append(metadef_tags) return tags def _update_tags(self, path, headers, data): # The tag should be mutable response = requests.put(path, headers=headers, json=data) self.assertEqual(http.OK, response.status_code, response.text) # Returned metadata_tag should reflect the changes metadata_tag = response.json() self.assertEqual(data['name'], metadata_tag['name']) # Updates should persist across requests response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) self.assertEqual(data['name'], metadata_tag['name']) def test_role_base_metadata_tags_lifecycle(self): # Create public and private namespaces for tenant1 and tenant2 path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) tenant1_namespaces = [] tenant2_namespaces = [] for tenant in [self.tenant1, self.tenant2]: headers['X-Tenant-Id'] = tenant for visibility in ['public', 'private']: namespace_data = { "namespace": "%s_%s_namespace" % (tenant, visibility), "display_name": "My User Friendly Namespace", "description": "My description", "visibility": visibility, "owner": tenant } namespace = self.create_namespace(path, headers, namespace_data) self.assertNamespacesEqual(namespace, namespace_data) if tenant == self.tenant1: tenant1_namespaces.append(namespace) else: tenant2_namespaces.append(namespace) # Create a metadef tags for each namespace created above tenant1_tags = self._create_tags(tenant1_namespaces) tenant2_tags = self._create_tags(tenant2_namespaces) def _check_tag_access(tags, tenant): headers = self._headers({'content-type': 'application/json', 'X-Tenant-Id': tenant, 'X-Roles': 'reader,member'}) for tag in tags: for namespace, tag_name in tag.items(): path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)) response = requests.get(path, headers=headers) if namespace.split('_')[1] == 'public': expected = http.OK else: expected = http.NOT_FOUND # Make sure we can see all public and own private tags, # but not the private tags of other tenant's self.assertEqual(expected, response.status_code) # Make sure the same holds for listing path = self._url( '/v2/metadefs/namespaces/%s/tags' % namespace) response = requests.get(path, headers=headers) self.assertEqual(expected, response.status_code) if expected == http.OK: resp_props = response.json()['tags'] self.assertEqual( sorted(tag.values()), sorted([x['name'] for x in resp_props])) # Check Tenant 1 can access tags of all public namespace # and cannot access tags of private namespace of Tenant 2 _check_tag_access(tenant2_tags, self.tenant1) # Check Tenant 2 can access tags of public namespace and # cannot access tags of private namespace of Tenant 1 _check_tag_access(tenant1_tags, self.tenant2) # Update tags with admin and non admin role total_tags = tenant1_tags + tenant2_tags for tag in total_tags: for namespace, tag_name in tag.items(): data = { "name": tag_name} path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)) # Update tag should fail with non admin role headers['X-Roles'] = "reader,member" response = requests.put(path, headers=headers, json=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Should work with admin role headers = self._headers({ 'X-Tenant-Id': namespace.split('_')[0]}) self._update_tags(path, headers, data) # Delete tags should not be allowed to non admin role for tag in total_tags: for namespace, tag_name in tag.items(): path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)) response = requests.delete( path, headers=self._headers({ 'X-Roles': 'reader,member', 'X-Tenant-Id': namespace.split('_')[0] })) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete all metadef tags headers = self._headers() for tag in total_tags: for namespace, tag_name in tag.items(): path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace, tag_name)) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) # Deleted tags should not be exist response = requests.get(path, headers=headers) self.assertEqual(http.NOT_FOUND, response.status_code) # Create multiple tags should not be allowed to non admin role headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) data = { "tags": [{"name": "tag1"}, {"name": "tag2"}, {"name": "tag3"}] } for namespace in tenant1_namespaces: path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace['namespace'])) response = requests.post(path, headers=headers, json=data) self.assertEqual(http.FORBIDDEN, response.status_code) # Create multiple tags. headers = self._headers({'content-type': 'application/json'}) for namespace in tenant1_namespaces: path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace['namespace'])) response = requests.post(path, headers=headers, json=data) self.assertEqual(http.CREATED, response.status_code) # Delete multiple tags should not be allowed with non admin role headers = self._headers({'content-type': 'application/json', 'X-Roles': 'reader,member'}) for namespace in tenant1_namespaces: path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace['namespace'])) response = requests.delete(path, headers=headers) self.assertEqual(http.FORBIDDEN, response.status_code) # Delete multiple tags created above created tags headers = self._headers() for namespace in tenant1_namespaces: path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace['namespace'])) response = requests.delete(path, headers=headers) self.assertEqual(http.NO_CONTENT, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_schemas.py0000664000175000017500000000457700000000000023213 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http from oslo_serialization import jsonutils import requests from glance.tests import functional class TestSchemas(functional.FunctionalTest): def setUp(self): super(TestSchemas, self).setUp() self.cleanup() self.start_servers(**self.__dict__.copy()) def test_resource(self): # Ensure the image link works and custom properties are loaded path = 'http://%s:%d/v2/schemas/image' % ('127.0.0.1', self.api_port) response = requests.get(path) self.assertEqual(http.OK, response.status_code) image_schema = jsonutils.loads(response.text) expected = set([ 'id', 'name', 'visibility', 'checksum', 'os_hash_algo', 'os_hash_value', 'created_at', 'updated_at', 'tags', 'size', 'virtual_size', 'owner', 'container_format', 'disk_format', 'self', 'file', 'status', 'schema', 'direct_url', 'locations', 'min_ram', 'min_disk', 'protected', 'os_hidden', 'stores', ]) self.assertEqual(expected, set(image_schema['properties'].keys())) # Ensure the images link works and agrees with the image schema path = 'http://%s:%d/v2/schemas/images' % ('127.0.0.1', self.api_port) response = requests.get(path) self.assertEqual(http.OK, response.status_code) images_schema = jsonutils.loads(response.text) item_schema = images_schema['properties']['images']['items'] self.assertEqual(item_schema, image_schema) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_tasks.py0000664000175000017500000001145200000000000022703 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) TENANT3 = str(uuid.uuid4()) TENANT4 = str(uuid.uuid4()) class TestTasks(functional.FunctionalTest): def setUp(self): super(TestTasks, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_task_not_allowed_non_admin(self): self.start_servers(**self.__dict__.copy()) roles = {'X-Roles': 'member'} # Task list should be empty path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers(roles)) self.assertEqual(http.FORBIDDEN, response.status_code) def test_task_lifecycle(self): self.start_servers(**self.__dict__.copy()) # Task list should be empty path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(0, len(tasks)) # Create a task path = self._url('/v2/tasks') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({ "type": "import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } }) response = requests.post(path, headers=headers, data=data) self.assertEqual(http.CREATED, response.status_code) # Returned task entity should have a generated id and status task = jsonutils.loads(response.text) task_id = task['id'] self.assertIn('Location', response.headers) self.assertEqual(path + '/' + task_id, response.headers['Location']) checked_keys = set(['created_at', 'id', 'input', 'message', 'owner', 'schema', 'self', 'status', 'type', 'result', 'updated_at', 'request_id', 'user_id' ]) self.assertEqual(checked_keys, set(task.keys())) expected_task = { 'status': 'pending', 'type': 'import', 'input': { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' }}, 'schema': '/v2/schemas/task', } for key, value in expected_task.items(): self.assertEqual(value, task[key], key) # Tasks list should now have one entry path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers()) self.assertEqual(http.OK, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(1, len(tasks)) self.assertEqual(task_id, tasks[0]['id']) # Attempt to delete a task path = self._url('/v2/tasks/%s' % tasks[0]['id']) response = requests.delete(path, headers=self._headers()) self.assertEqual(http.METHOD_NOT_ALLOWED, response.status_code) self.assertIsNotNone(response.headers.get('Allow')) self.assertEqual('GET', response.headers.get('Allow')) self.stop_servers() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/functional/v2/test_tasks_api_policy.py0000664000175000017500000000763300000000000025121 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_policy.policy from oslo_serialization import jsonutils from glance.api import policy from glance.tests import functional TASK1 = { "type": "import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } } TASK2 = { "type": "api_image_import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } } class TestTasksPolicy(functional.SynchronousAPIBase): def setUp(self): super(TestTasksPolicy, self).setUp() self.policy = policy.Enforcer() def set_policy_rules(self, rules): self.policy.set_rules( oslo_policy.policy.Rules.from_dict(rules), overwrite=True) def start_server(self): with mock.patch.object(policy, 'Enforcer') as mock_enf: mock_enf.return_value = self.policy super(TestTasksPolicy, self).start_server() def _create_task(self, path=None, data=None, expected_code=201): if not path: path = "/v2/tasks" resp = self.api_post(path, json=data) task = jsonutils.loads(resp.text) self.assertEqual(expected_code, resp.status_code) return task def load_data(self): tasks = [] for task in [TASK1, TASK2]: resp = self._create_task(data=task) tasks.append(resp['id']) self.assertEqual(task['type'], resp['type']) return tasks def test_tasks_create_basic(self): self.start_server() # First make sure create tasks works with default policy path = '/v2/tasks' task = self._create_task(path=path, data=TASK1) self.assertEqual('import', task['type']) # Now disable tasks_api_access permissions and make sure any other # attempts fail self.set_policy_rules({'tasks_api_access': '!'}) resp = self.api_post(path, json=TASK2) self.assertEqual(403, resp.status_code) def test_tasks_index_basic(self): self.start_server() # First make sure get tasks works with default policy tasks = self.load_data() path = '/v2/tasks' output = self.api_get(path).json self.assertEqual(len(tasks), len(output['tasks'])) # Now disable tasks_api_access permissions and make sure any other # attempts fail self.set_policy_rules({'tasks_api_access': '!'}) resp = self.api_get(path) self.assertEqual(403, resp.status_code) def test_tasks_get_basic(self): self.start_server() # First make sure get task works with default policy tasks = self.load_data() path = '/v2/tasks/%s' % tasks[0] task = self.api_get(path).json self.assertEqual('import', task['type']) # Now disable tasks_api_access permissions and make sure any other # attempts fail self.set_policy_rules({'tasks_api_access': '!'}) path = '/v2/tasks/%s' % tasks[1] resp = self.api_get(path) self.assertEqual(403, resp.status_code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8943074 glance-29.0.0/glance/tests/integration/0000775000175000017500000000000000000000000017774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/integration/__init__.py0000664000175000017500000000000000000000000022073 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8943074 glance-29.0.0/glance/tests/integration/v2/0000775000175000017500000000000000000000000020323 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/integration/v2/__init__.py0000664000175000017500000000000000000000000022422 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/integration/v2/base.py0000664000175000017500000001404600000000000021614 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import os.path import shutil import tempfile import fixtures import glance_store from oslo_config import cfg from oslo_db import options import glance.common.client from glance.common import config import glance.db.sqlalchemy.api from glance import tests as glance_tests from glance.tests import utils as test_utils TESTING_API_PASTE_CONF = """ [composite:glance-api] paste.composite_factory = glance.api:root_app_factory /: api [pipeline: api] pipeline = versionnegotiation gzip unauthenticated-context rootapp [composite:glance-api-caching] paste.composite_factory = glance.api:root_app_factory /: api-caching [pipeline: api-caching] pipeline = versionnegotiation gzip unauthenticated-context cache rootapp [composite:glance-api-cachemanagement] paste.composite_factory = glance.api:root_app_factory /: api-cachemanagement [pipeline: api-cachemanagement] pipeline = versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [composite:glance-api-fakeauth] paste.composite_factory = glance.api:root_app_factory /: api-fakeauth [pipeline: api-fakeauth] pipeline = versionnegotiation gzip fakeauth context rootapp [composite:glance-api-noauth] paste.composite_factory = glance.api:root_app_factory /: api-noauth [pipeline: api-noauth] pipeline = versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ CONF = cfg.CONF class ApiTest(test_utils.BaseTestCase): def setUp(self): super(ApiTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self._configure_logging() self._setup_database() self._setup_stores() self._setup_property_protection() self.glance_api_app = self._load_paste_app( 'glance-api', flavor=getattr(self, 'api_flavor', ''), conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF), ) self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app) def _setup_property_protection(self): self._copy_data_file('property-protections.conf', self.test_dir) self.property_file = os.path.join(self.test_dir, 'property-protections.conf') def _configure_logging(self): self.config(default_log_levels=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=DEBUG' ]) def _setup_database(self): sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir options.set_defaults(CONF, connection=sql_connection) glance.db.sqlalchemy.api.clear_db_env() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] shutil.copyfile(db_location, "%s/tests.sqlite" % self.test_dir) else: test_utils.db_sync() # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) shutil.copyfile('%s/tests.sqlite' % self.test_dir, db_location) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db) def _setup_stores(self): glance_store.register_opts(CONF) image_dir = os.path.join(self.test_dir, "images") self.config(group='glance_store', filesystem_store_datadir=image_dir) glance_store.create_stores() def _load_paste_app(self, name, flavor, conf): conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name) with open(conf_file_path, 'w') as conf_file: conf_file.write(conf) conf_file.flush() return config.load_paste_app(name, flavor=flavor, conf_file=conf_file_path) def tearDown(self): glance.db.sqlalchemy.api.clear_db_env() super(ApiTest, self).tearDown() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/integration/v2/test_property_quota_violations.py0000664000175000017500000001244000000000000027301 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from oslo_config import cfg from oslo_serialization import jsonutils from glance.tests.integration.v2 import base CONF = cfg.CONF class TestPropertyQuotaViolations(base.ApiTest): def __init__(self, *args, **kwargs): super(TestPropertyQuotaViolations, self).__init__(*args, **kwargs) self.api_flavor = 'noauth' def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': "foo", 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def _get(self, image_id=""): path = ('/v2/images/%s' % image_id).rstrip('/') rsp, content = self.http.request(path, 'GET', headers=self._headers()) self.assertEqual(http.client.OK, rsp.status) content = jsonutils.loads(content) return content def _create_image(self, body): path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) rsp, content = self.http.request(path, 'POST', headers=headers, body=jsonutils.dumps(body)) self.assertEqual(http.client.CREATED, rsp.status) return jsonutils.loads(content) def _patch(self, image_id, body, expected_status): path = '/v2/images/%s' % image_id media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) rsp, content = self.http.request(path, 'PATCH', headers=headers, body=jsonutils.dumps(body)) self.assertEqual(expected_status, rsp.status, content) return content def test_property_ops_when_quota_violated(self): # Image list must be empty to begin with image_list = self._get()['images'] self.assertEqual(0, len(image_list)) orig_property_quota = 10 CONF.set_override('image_property_quota', orig_property_quota) # Create an image (with deployer-defined properties) req_body = {'name': 'testimg', 'disk_format': 'aki', 'container_format': 'aki'} for i in range(orig_property_quota): req_body['k_%d' % i] = 'v_%d' % i image = self._create_image(req_body) image_id = image['id'] for i in range(orig_property_quota): self.assertEqual('v_%d' % i, image['k_%d' % i]) # Now reduce property quota. We should be allowed to modify/delete # existing properties (even if the result still exceeds property quota) # but not add new properties nor replace existing properties with new # properties (as long as we're over the quota) self.config(image_property_quota=2) patch_body = [{'op': 'replace', 'path': '/k_4', 'value': 'v_4.new'}] image = jsonutils.loads(self._patch(image_id, patch_body, http.client.OK)) self.assertEqual('v_4.new', image['k_4']) patch_body = [{'op': 'remove', 'path': '/k_7'}] image = jsonutils.loads(self._patch(image_id, patch_body, http.client.OK)) self.assertNotIn('k_7', image) patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}] self._patch(image_id, patch_body, http.client.REQUEST_ENTITY_TOO_LARGE) image = self._get(image_id) self.assertNotIn('k_100', image) patch_body = [ {'op': 'remove', 'path': '/k_5'}, {'op': 'add', 'path': '/k_100', 'value': 'v_100'}, ] self._patch(image_id, patch_body, http.client.REQUEST_ENTITY_TOO_LARGE) image = self._get(image_id) self.assertNotIn('k_100', image) self.assertIn('k_5', image) # temporary violations to property quota should be allowed as long as # it's within one PATCH request and the end result does not violate # quotas. patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}, {'op': 'add', 'path': '/k_99', 'value': 'v_99'}] to_rm = ['k_%d' % i for i in range(orig_property_quota) if i != 7] patch_body.extend([{'op': 'remove', 'path': '/%s' % k} for k in to_rm]) image = jsonutils.loads(self._patch(image_id, patch_body, http.client.OK)) self.assertEqual('v_99', image['k_99']) self.assertEqual('v_100', image['k_100']) for k in to_rm: self.assertNotIn(k, image) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/integration/v2/test_tasks_api.py0000664000175000017500000005050000000000000023712 0ustar00zuulzuul00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import eventlet from oslo_serialization import jsonutils as json from glance.api.v2 import tasks from glance.common import timeutils from glance.tests.integration.v2 import base TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' def minimal_task_headers(owner='tenant1'): headers = { 'X-Auth-Token': 'user1:%s:admin' % owner, 'Content-Type': 'application/json', } return headers def _new_task_fixture(**kwargs): task_data = { "type": "import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } } task_data.update(kwargs) return task_data class TestTasksApi(base.ApiTest): def __init__(self, *args, **kwargs): super(TestTasksApi, self).__init__(*args, **kwargs) self.api_flavor = 'fakeauth' def _wait_on_task_execution(self, max_wait=5): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of seconds defined by max_wait while timeutils.delta_seconds(start, timeutils.utcnow()) < max_wait: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(http.client.OK, res.status) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: # Bug #1541487: we must give time to the server to execute the # task, but the server is run in the same process than the # test. Use eventlet to give the control to the pending server # task. eventlet.sleep(0.05) continue else: break def _post_new_task(self, **kwargs): task_owner = kwargs.get('owner') headers = minimal_task_headers(task_owner) task_data = _new_task_fixture() task_data['input']['import_from'] = "http://example.com" body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request(path, 'POST', headers=headers, body=body_content) self.assertEqual(http.client.CREATED, response.status) task = json.loads(content) task_id = task['id'] self.assertIsNotNone(task_id) self.assertEqual(task_owner, task['owner']) self.assertEqual(task_data['type'], task['type']) self.assertEqual(task_data['input'], task['input']) self.assertEqual("http://localhost" + path + "/" + task_id, response.webob_resp.headers['Location']) return task, task_data def test_all_task_api(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(http.client.OK, response.status) self.assertFalse(content_dict['tasks']) # 1. GET /tasks/{task_id} # Verify non-existent task task_id = 'NON_EXISTENT_TASK' path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.NOT_FOUND, response.status) # 2. POST /tasks # Create a new task task_owner = 'tenant1' data, req_input = self._post_new_task(owner=task_owner) # 3. GET /tasks/{task_id} # Get an existing task task_id = data['id'] path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) # NOTE(sabari): wait for all task executions to finish before checking # task status. self._wait_on_task_execution(max_wait=10) # 4. GET /tasks # Get all tasks (not deleted) path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) self.assertIsNotNone(content) data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(1, len(data['tasks'])) # NOTE(venkatesh) find a way to get expected_keys from tasks controller expected_keys = set(['id', 'expires_at', 'type', 'owner', 'status', 'created_at', 'updated_at', 'self', 'schema']) task = data['tasks'][0] self.assertEqual(expected_keys, set(task.keys())) self.assertEqual(req_input['type'], task['type']) self.assertEqual(task_owner, task['owner']) self.assertEqual('success', task['status']) self.assertIsNotNone(task['created_at']) self.assertIsNotNone(task['updated_at']) def test_task_schema_api(self): # 0. GET /schemas/task # Verify schema for task path = "/v2/schemas/task" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) schema = tasks.get_task_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # 1. GET /schemas/tasks # Verify schema for tasks path = "/v2/schemas/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) schema = tasks.get_collection_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_create_new_task(self): # 0. POST /tasks # Create a new task with valid input and type task_data = _new_task_fixture() task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(http.client.CREATED, response.status) data = json.loads(content) task_id = data['id'] self.assertIsNotNone(task_id) self.assertEqual(task_owner, data['owner']) self.assertEqual(task_data['type'], data['type']) self.assertEqual(task_data['input'], data['input']) # 1. POST /tasks # Create a new task with invalid type # Expect BadRequest(400) Error as response task_data = _new_task_fixture(type='invalid') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(http.client.BAD_REQUEST, response.status) # 1. POST /tasks # Create a new task with invalid input for type 'import' # Expect BadRequest(400) Error as response task_data = _new_task_fixture(task_input='{something: invalid}') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(http.client.BAD_REQUEST, response.status) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_tasks_with_filter(self): # 0. GET /v2/tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) content_dict = json.loads(content) self.assertFalse(content_dict['tasks']) task_ids = [] # 1. Make 2 POST requests on /tasks with various attributes task_owner = TENANT1 data, req_input1 = self._post_new_task(owner=task_owner) task_ids.append(data['id']) task_owner = TENANT2 data, req_input2 = self._post_new_task(owner=task_owner) task_ids.append(data['id']) # 2. GET /tasks # Verify two import tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) content_dict = json.loads(content) self.assertEqual(2, len(content_dict['tasks'])) # 3. GET /tasks with owner filter # Verify correct task returned with owner params = "owner=%s" % TENANT1 path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) content_dict = json.loads(content) self.assertEqual(1, len(content_dict['tasks'])) self.assertEqual(TENANT1, content_dict['tasks'][0]['owner']) # Check the same for different owner. params = "owner=%s" % TENANT2 path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) content_dict = json.loads(content) self.assertEqual(1, len(content_dict['tasks'])) self.assertEqual(TENANT2, content_dict['tasks'][0]['owner']) # 4. GET /tasks with type filter # Verify correct task returned with type params = "type=import" path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) content_dict = json.loads(content) self.assertEqual(2, len(content_dict['tasks'])) actual_task_ids = [task['id'] for task in content_dict['tasks']] self.assertEqual(set(task_ids), set(actual_task_ids)) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_limited_tasks(self): """ Ensure marker and limit query params work """ # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) tasks = json.loads(content) self.assertFalse(tasks['tasks']) task_ids = [] # 1. POST /tasks with three tasks with various attributes task, _ = self._post_new_task(owner=TENANT1) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT2) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT3) task_ids.append(task['id']) # 2. GET /tasks # Verify 3 tasks are returned path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) tasks = json.loads(content)['tasks'] self.assertEqual(3, len(tasks)) # 3. GET /tasks with limit of 2 # Verify only two tasks were returned params = "limit=2" path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(tasks[0]['id'], actual_tasks[0]['id']) self.assertEqual(tasks[1]['id'], actual_tasks[1]['id']) # 4. GET /tasks with marker # Verify only two tasks were returned params = "marker=%s" % tasks[0]['id'] path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(tasks[1]['id'], actual_tasks[0]['id']) self.assertEqual(tasks[2]['id'], actual_tasks[1]['id']) # 5. GET /tasks with marker and limit # Verify only one task was returned with the correct id params = "limit=1&marker=%s" % tasks[1]['id'] path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(1, len(actual_tasks)) self.assertEqual(tasks[2]['id'], actual_tasks[0]['id']) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_ordered_tasks(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) tasks = json.loads(content) self.assertFalse(tasks['tasks']) task_ids = [] # 1. POST /tasks with three tasks with various attributes task, _ = self._post_new_task(owner=TENANT1) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT2) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT3) task_ids.append(task['id']) # 2. GET /tasks with no query params # Verify three tasks sorted by created_at desc # 2. GET /tasks # Verify 3 tasks are returned path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(3, len(actual_tasks)) self.assertEqual(task_ids[2], actual_tasks[0]['id']) self.assertEqual(task_ids[1], actual_tasks[1]['id']) self.assertEqual(task_ids[0], actual_tasks[2]['id']) # 3. GET /tasks sorted by owner asc params = 'sort_key=owner&sort_dir=asc' path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) expected_task_owners = [TENANT1, TENANT2, TENANT3] expected_task_owners.sort() actual_tasks = json.loads(content)['tasks'] self.assertEqual(3, len(actual_tasks)) self.assertEqual(expected_task_owners, [t['owner'] for t in actual_tasks]) # 4. GET /tasks sorted by owner desc with a marker params = 'sort_key=owner&sort_dir=desc&marker=%s' % task_ids[0] path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(task_ids[2], actual_tasks[0]['id']) self.assertEqual(task_ids[1], actual_tasks[1]['id']) self.assertEqual(TENANT3, actual_tasks[0]['owner']) self.assertEqual(TENANT2, actual_tasks[1]['owner']) # 5. GET /tasks sorted by owner asc with a marker params = 'sort_key=owner&sort_dir=asc&marker=%s' % task_ids[0] path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(0, len(actual_tasks)) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_delete_task(self): # 0. POST /tasks # Create a new task with valid input and type task_data = _new_task_fixture() task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(http.client.CREATED, response.status) data = json.loads(content) task_id = data['id'] # 1. DELETE on /tasks/{task_id} # Attempt to delete a task path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'DELETE', headers=minimal_task_headers()) self.assertEqual(http.client.METHOD_NOT_ALLOWED, response.status) self.assertEqual('GET', response.webob_resp.headers.get('Allow')) self.assertEqual(('GET',), response.webob_resp.allow) self.assertEqual(('GET',), response.allow) # 2. GET /tasks/{task_id} # Ensure that methods mentioned in the Allow header work path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(http.client.OK, response.status) self.assertIsNotNone(content) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/stubs.py0000664000175000017500000000550300000000000017166 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import routes import webob from glance.api.middleware import context from glance.api.v2 import router def stub_out_store_server(stubs, base_dir, **kwargs): """Mocks calls to 127.0.0.1 on 9292 for testing. Done so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() def connect(self): return True def close(self): return True def putrequest(self, method, url): self.req = webob.Request.blank(url) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), ( 'Content-Length and Transfer-Encoding are mutually exclusive') def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(url) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_image_iter(self): for i in self.source.app_iter: yield i ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/test_hacking.py0000664000175000017500000001011300000000000020462 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.hacking import checks from glance.tests import utils class HackingTestCase(utils.BaseTestCase): def test_assert_true_instance(self): self.assertEqual(1, len(list(checks.assert_true_instance( "self.assertTrue(isinstance(e, " "exception.BuildAbortException))")))) self.assertEqual( 0, len(list(checks.assert_true_instance("self.assertTrue()")))) def test_assert_equal_type(self): self.assertEqual(1, len(list(checks.assert_equal_type( "self.assertEqual(type(also['QuicAssist']), list)")))) self.assertEqual( 0, len(list(checks.assert_equal_type("self.assertTrue()")))) def test_assert_equal_none(self): self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(A, None)")))) self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(None, A)")))) self.assertEqual( 0, len(list(checks.assert_equal_none("self.assertIsNone()")))) def test_no_translate_debug_logs(self): self.assertEqual(1, len(list(checks.no_translate_debug_logs( "LOG.debug(_('foo'))", "glance/store/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.debug('foo')", "glance/store/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.info(_('foo'))", "glance/store/foo.py")))) def test_no_contextlib_nested(self): self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with contextlib.nested(")))) self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with nested(")))) self.assertEqual(0, len(list(checks.check_no_contextlib_nested( "with foo as bar")))) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_no_log_warn(self): code = """ LOG.warn("LOG.warn is deprecated") """ errors = [(1, 0, 'G330')] self._assert_has_errors(code, checks.no_log_warn, expected_errors=errors) code = """ LOG.warning("LOG.warn is deprecated") """ self._assert_has_no_errors(code, checks.no_log_warn) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9023082 glance-29.0.0/glance/tests/unit/0000775000175000017500000000000000000000000016430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/__init__.py0000664000175000017500000000000000000000000020527 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9023082 glance-29.0.0/glance/tests/unit/api/0000775000175000017500000000000000000000000017201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/__init__.py0000664000175000017500000000000000000000000021300 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9023082 glance-29.0.0/glance/tests/unit/api/middleware/0000775000175000017500000000000000000000000021316 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/middleware/__init__.py0000664000175000017500000000000000000000000023415 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/middleware/test_cache_manage.py0000664000175000017500000001436600000000000025314 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.middleware import cache_manage from glance.api.v2 import cached_images import glance.common.config import glance.common.wsgi import glance.image_cache from glance.tests import utils as test_utils from unittest import mock import webob class TestCacheManageFilter(test_utils.BaseTestCase): @mock.patch.object(glance.image_cache.ImageCache, "init_driver") def setUp(self, mock_init_driver): super(TestCacheManageFilter, self).setUp() self.stub_application_name = "stubApplication" self.stub_value = "Stub value" self.image_id = "image_id_stub" mock_init_driver.return_value = None self.cache_manage_filter = cache_manage.CacheManageFilter( self.stub_application_name) def test_bogus_request(self): # prepare bogus_request = webob.Request.blank("/bogus/") # call resource = self.cache_manage_filter.process_request(bogus_request) # check self.assertIsNone(resource) @mock.patch.object(cached_images.CacheController, "get_cached_images") def test_get_cached_images(self, mock_get_cached_images): # setup mock_get_cached_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/cached_images") # call resource = self.cache_manage_filter.process_request(request) # check mock_get_cached_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "delete_cached_image") def test_delete_cached_image(self, mock_delete_cached_image): # setup mock_delete_cached_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/cached_images/" + self.image_id, environ={'REQUEST_METHOD': "DELETE"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_cached_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "delete_cached_images") def test_delete_cached_images(self, mock_delete_cached_images): # setup mock_delete_cached_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/cached_images", environ={'REQUEST_METHOD': "DELETE"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_cached_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "queue_image") def test_put_queued_image(self, mock_queue_image): # setup mock_queue_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/queued_images/" + self.image_id, environ={'REQUEST_METHOD': "PUT"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_queue_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "get_queued_images") def test_get_queued_images(self, mock_get_queued_images): # setup mock_get_queued_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/queued_images") # call resource = self.cache_manage_filter.process_request(request) # check mock_get_queued_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "delete_queued_image") def test_delete_queued_image(self, mock_delete_queued_image): # setup mock_delete_queued_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/queued_images/" + self.image_id, environ={'REQUEST_METHOD': 'DELETE'}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_queued_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.CacheController, "delete_queued_images") def test_delete_queued_images(self, mock_delete_queued_images): # setup mock_delete_queued_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v2/queued_images", environ={'REQUEST_METHOD': 'DELETE'}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_queued_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/test_cmd.py0000664000175000017500000002175000000000000021362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import sys from unittest import mock import glance_store as store from oslo_config import cfg from oslo_log import log as logging import glance.cmd.api import glance.cmd.cache_cleaner import glance.cmd.cache_pruner import glance.common.config from glance.common import exception as exc import glance.common.wsgi import glance.image_cache.cleaner from glance.image_cache import prefetcher import glance.image_cache.pruner from glance.tests import utils as test_utils CONF = cfg.CONF class TestGlanceApiCmd(test_utils.BaseTestCase): __argv_backup = None def _do_nothing(self, *args, **kwargs): pass def _raise(self, exc): def fake(*args, **kwargs): raise exc return fake def setUp(self): super(TestGlanceApiCmd, self).setUp() self.__argv_backup = sys.argv sys.argv = ['glance-api'] self.stderr = io.StringIO() sys.stderr = self.stderr store.register_opts(CONF) self.mock_object(glance.common.config, 'load_paste_app', self._do_nothing) self.mock_object(glance.common.wsgi.Server, 'start', self._do_nothing) self.mock_object(glance.common.wsgi.Server, 'wait', self._do_nothing) def tearDown(self): sys.stderr = sys.__stderr__ sys.argv = self.__argv_backup super(TestGlanceApiCmd, self).tearDown() @mock.patch('glance.async_.set_threadpool_model',) @mock.patch.object(prefetcher, 'Prefetcher') def test_supported_default_store(self, mock_prefetcher, mock_set_model): self.config(group='glance_store', default_store='file') glance.cmd.api.main() # Make sure we declared the system threadpool model as eventlet mock_set_model.assert_called_once_with('eventlet') @mock.patch.object(prefetcher, 'Prefetcher') @mock.patch('glance.async_.set_threadpool_model', new=mock.MagicMock()) def test_worker_creation_failure(self, mock_prefetcher): failure = exc.WorkerCreationFailure(reason='test') self.mock_object(glance.common.wsgi.Server, 'start', self._raise(failure)) exit = self.assertRaises(SystemExit, glance.cmd.api.main) self.assertEqual(2, exit.code) @mock.patch('glance.async_.set_threadpool_model', new=mock.MagicMock()) def test_cleaner_store_config_assertion(self): failure = exc.GlanceException('This is what happens with http://') self.config(node_staging_uri='http://good.luck') self.mock_object(glance.common.wsgi.Server, 'start', self._raise(failure)) # Make sure that a failure to run the wsgi.Server will call our # clean print-and-abort handler. exit = self.assertRaises(SystemExit, glance.cmd.api.main) self.assertEqual(99, exit.code) @mock.patch.object(glance.common.config, 'parse_cache_args') @mock.patch.object(logging, 'setup') @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') @mock.patch.object(glance.image_cache.ImageCache, 'clean') def test_cache_cleaner_main(self, mock_cache_clean, mock_cache_init_driver, mock_log_setup, mock_parse_config): mock_cache_init_driver.return_value = None manager = mock.MagicMock() manager.attach_mock(mock_log_setup, 'mock_log_setup') manager.attach_mock(mock_parse_config, 'mock_parse_config') manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') manager.attach_mock(mock_cache_clean, 'mock_cache_clean') glance.cmd.cache_cleaner.main() expected_call_sequence = [mock.call.mock_parse_config(), mock.call.mock_log_setup(CONF, 'glance'), mock.call.mock_cache_init_driver(), mock.call.mock_cache_clean()] self.assertEqual(expected_call_sequence, manager.mock_calls) @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') def test_cache_cleaner_main_runtime_exception_handling(self, mock_cache): mock_cache.return_value = None self.mock_object(glance.image_cache.cleaner.Cleaner, 'run', self._raise(RuntimeError)) exit = self.assertRaises(SystemExit, glance.cmd.cache_cleaner.main) self.assertEqual('ERROR: ', exit.code) @mock.patch.object(glance.common.config, 'parse_cache_args') @mock.patch.object(logging, 'setup') @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') @mock.patch.object(glance.image_cache.ImageCache, 'prune') def test_cache_pruner_main(self, mock_cache_prune, mock_cache_init_driver, mock_log_setup, mock_parse_config): mock_cache_init_driver.return_value = None manager = mock.MagicMock() manager.attach_mock(mock_log_setup, 'mock_log_setup') manager.attach_mock(mock_parse_config, 'mock_parse_config') manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') manager.attach_mock(mock_cache_prune, 'mock_cache_prune') glance.cmd.cache_pruner.main() expected_call_sequence = [mock.call.mock_parse_config(), mock.call.mock_log_setup(CONF, 'glance'), mock.call.mock_cache_init_driver(), mock.call.mock_cache_prune()] self.assertEqual(expected_call_sequence, manager.mock_calls) @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') def test_cache_pruner_main_runtime_exception_handling(self, mock_cache): mock_cache.return_value = None self.mock_object(glance.image_cache.pruner.Pruner, 'run', self._raise(RuntimeError)) exit = self.assertRaises(SystemExit, glance.cmd.cache_pruner.main) self.assertEqual('ERROR: ', exit.code) def test_fail_with_value_error(self): with mock.patch('sys.stderr.write') as mock_stderr: with mock.patch('sys.exit') as mock_exit: exc_msg = 'A ValueError, LOL!' exc = ValueError(exc_msg) glance.cmd.api.fail(exc) mock_stderr.assert_called_once_with('ERROR: %s\n' % exc_msg) mock_exit.assert_called_once_with(4) def test_fail_with_config_exception(self): with mock.patch('sys.stderr.write') as mock_stderr: with mock.patch('sys.exit') as mock_exit: exc_msg = 'A ConfigError by George!' exc = cfg.ConfigFileValueError(exc_msg) glance.cmd.api.fail(exc) mock_stderr.assert_called_once_with('ERROR: %s\n' % exc_msg) mock_exit.assert_called_once_with(5) def test_fail_with_unknown_exception(self): with mock.patch('sys.stderr.write') as mock_stderr: with mock.patch('sys.exit') as mock_exit: exc_msg = 'A Crazy Unknown Error.' exc = CrayCray(exc_msg) glance.cmd.api.fail(exc) mock_stderr.assert_called_once_with('ERROR: %s\n' % exc_msg) mock_exit.assert_called_once_with(99) def test_main_with_store_config_exception(self): with mock.patch.object(glance.common.config, 'parse_args') as mock_config: with mock.patch('sys.exit') as mock_exit: exc = store.exceptions.BadStoreConfiguration() mock_config.side_effect = exc glance.cmd.api.main() mock_exit.assert_called_once_with(3) def test_main_with_runtime_error(self): with mock.patch.object(glance.common.config, 'parse_args') as mock_config: with mock.patch('sys.exit') as mock_exit: exc = RuntimeError() mock_config.side_effect = exc glance.cmd.api.main() mock_exit.assert_called_once_with(1) def test_main_with_worker_creation_failure(self): with mock.patch.object(glance.common.config, 'parse_args') as mock_config: with mock.patch('sys.exit') as mock_exit: exx = exc.WorkerCreationFailure() mock_config.side_effect = exx glance.cmd.api.main() mock_exit.assert_called_once_with(2) class CrayCray(Exception): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/test_common.py0000664000175000017500000001521000000000000022101 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_log.fixture import logging_error as log_fixture import testtools import webob import glance.api.common from glance.common import exception from glance.tests.unit import fixtures as glance_fixtures class SimpleIterator(object): def __init__(self, file_object, chunk_size): self.file_object = file_object self.chunk_size = chunk_size def __iter__(self): def read_chunk(): return self.fobj.read(self.chunk_size) chunk = read_chunk() while chunk: yield chunk chunk = read_chunk() else: return class TestSizeCheckedIter(testtools.TestCase): def setUp(self): super().setUp() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) def _get_image_metadata(self): return {'id': 'e31cb99c-fe89-49fb-9cc5-f5104fffa636'} def _get_webob_response(self): request = webob.Request.blank('/') response = webob.Response() response.request = request return response def test_uniform_chunk_size(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 4, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_small_last_chunk(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 3, ['AB', 'C'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('C', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_variable_chunk_size(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 6, ['AB', '', 'CDE', 'F'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('', next(checked_image)) self.assertEqual('CDE', next(checked_image)) self.assertEqual('F', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_too_many_chunks(self): """An image should streamed regardless of expected_size""" resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 4, ['AB', 'CD', 'EF'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertEqual('EF', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_few_chunks(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 6, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_much_data(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 3, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_little_data(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 6, ['AB', 'CD', 'E'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertEqual('E', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) class TestThreadPool(testtools.TestCase): def setUp(self): super().setUp() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) @mock.patch('glance.async_.get_threadpool_model') def test_get_thread_pool(self, mock_gtm): get_thread_pool = glance.api.common.get_thread_pool pool1 = get_thread_pool('pool1', size=123) get_thread_pool('pool2', size=456) pool1a = get_thread_pool('pool1') # Two calls for the same pool should return the exact same thing self.assertEqual(pool1, pool1a) # Only two calls to get new threadpools should have been made mock_gtm.return_value.assert_has_calls( [mock.call(123), mock.call(456)]) @mock.patch('glance.async_.get_threadpool_model') def test_get_thread_pool_log(self, mock_gtm): with mock.patch.object(glance.api.common, 'LOG') as mock_log: glance.api.common.get_thread_pool('test-pool') mock_log.debug.assert_called_once_with( 'Initializing named threadpool %r', 'test-pool') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/api/test_property_protections.py0000664000175000017500000003346200000000000025137 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api import policy from glance.api import property_protections from glance.common import exception from glance.common import property_utils import glance.domain from glance.tests import utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' class TestProtectedImageRepoProxy(utils.BaseTestCase): class ImageRepoStub(object): def __init__(self, fixtures): self.fixtures = fixtures def get(self, image_id): for f in self.fixtures: if f.image_id == image_id: return f else: raise ValueError(image_id) def list(self, *args, **kwargs): return self.fixtures def add(self, image): self.fixtures.append(image) def setUp(self): super(TestProtectedImageRepoProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) self.property_rules = property_utils.PropertyRules(self.policy) self.image_factory = glance.domain.ImageFactory() extra_props = {'spl_create_prop': 'c', 'spl_read_prop': 'r', 'spl_update_prop': 'u', 'spl_delete_prop': 'd', 'forbidden': 'prop'} extra_props_2 = {'spl_read_prop': 'r', 'forbidden': 'prop'} self.fixtures = [ self.image_factory.new_image(image_id='1', owner=TENANT1, extra_properties=extra_props), self.image_factory.new_image(owner=TENANT2, visibility='public'), self.image_factory.new_image(image_id='3', owner=TENANT1, extra_properties=extra_props_2), ] self.context = glance.context.RequestContext(roles=['spl_role']) image_repo = self.ImageRepoStub(self.fixtures) self.image_repo = property_protections.ProtectedImageRepoProxy( image_repo, self.context, self.property_rules) def test_get_image(self): image_id = '1' result_image = self.image_repo.get(image_id) result_extra_props = result_image.extra_properties self.assertEqual('c', result_extra_props['spl_create_prop']) self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertEqual('u', result_extra_props['spl_update_prop']) self.assertEqual('d', result_extra_props['spl_delete_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) def test_list_image(self): result_images = self.image_repo.list() self.assertEqual(3, len(result_images)) result_extra_props = result_images[0].extra_properties self.assertEqual('c', result_extra_props['spl_create_prop']) self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertEqual('u', result_extra_props['spl_update_prop']) self.assertEqual('d', result_extra_props['spl_delete_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) result_extra_props = result_images[1].extra_properties self.assertEqual({}, result_extra_props) result_extra_props = result_images[2].extra_properties self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) class TestProtectedImageProxy(utils.BaseTestCase): def setUp(self): super(TestProtectedImageProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) self.property_rules = property_utils.PropertyRules(self.policy) class ImageStub(object): def __init__(self, extra_prop): self.extra_properties = extra_prop def test_read_image_with_extra_prop(self): context = glance.context.RequestContext(roles=['spl_role']) extra_prop = {'spl_read_prop': 'read', 'spl_fake_prop': 'prop'} image = self.ImageStub(extra_prop) result_image = property_protections.ProtectedImageProxy( image, context, self.property_rules) result_extra_props = result_image.extra_properties self.assertEqual('read', result_extra_props['spl_read_prop']) self.assertNotIn('spl_fake_prop', result_extra_props.keys()) class TestExtraPropertiesProxy(utils.BaseTestCase): def setUp(self): super(TestExtraPropertiesProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) self.property_rules = property_utils.PropertyRules(self.policy) def test_read_extra_property_as_admin_role(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) test_result = extra_prop_proxy['foo'] self.assertEqual('bar', test_result) def test_read_extra_property_as_unpermitted_role(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['unpermitted_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') def test_update_extra_property_as_permitted_role_after_read(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['foo'] = 'par' self.assertEqual('par', extra_prop_proxy['foo']) def test_update_extra_property_as_unpermitted_role_after_read(self): extra_properties = {'spl_read_prop': 'bar'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'spl_read_prop', 'par') def test_update_reserved_extra_property(self): extra_properties = {'spl_create_prop': 'bar'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'spl_create_prop', 'par') def test_update_empty_extra_property(self): extra_properties = {'foo': ''} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['foo'] = 'bar' self.assertEqual('bar', extra_prop_proxy['foo']) def test_create_extra_property_admin(self): extra_properties = {} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['boo'] = 'doo' self.assertEqual('doo', extra_prop_proxy['boo']) def test_create_reserved_extra_property(self): extra_properties = {} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'boo', 'doo') def test_delete_extra_property_as_admin_role(self): extra_properties = {'foo': 'bar'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) del extra_prop_proxy['foo'] self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') def test_delete_nonexistant_extra_property_as_admin_role(self): extra_properties = {} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__delitem__, 'foo') def test_delete_reserved_extra_property(self): extra_properties = {'spl_read_prop': 'r'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) # Ensure property has been created and can be read self.assertEqual('r', extra_prop_proxy['spl_read_prop']) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__delitem__, 'spl_read_prop') def test_delete_nonexistant_extra_property(self): extra_properties = {} roles = ['spl_role'] extra_prop_proxy = property_protections.ExtraPropertiesProxy( roles, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__delitem__, 'spl_read_prop') def test_delete_empty_extra_property(self): extra_properties = {'foo': ''} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) del extra_prop_proxy['foo'] self.assertNotIn('foo', extra_prop_proxy) class TestProtectedImageFactoryProxy(utils.BaseTestCase): def setUp(self): super(TestProtectedImageFactoryProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer(suppress_deprecation_warnings=True) self.property_rules = property_utils.PropertyRules(self.policy) self.factory = glance.domain.ImageFactory() def test_create_image_no_extra_prop(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'spl_create_prop': 'c'} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {'spl_create_prop': 'c'} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop_reserved_property(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} # no reg ex for property 'foo' is mentioned for spl_role in config self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, extra_properties=extra_props) def test_create_image_extra_prop_admin(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['admin']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop_invalid_role(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['imaginary-role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, extra_properties=extra_props) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9063087 glance-29.0.0/glance/tests/unit/async_/0000775000175000017500000000000000000000000017704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/__init__.py0000664000175000017500000000000000000000000022003 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9063087 glance-29.0.0/glance/tests/unit/async_/flows/0000775000175000017500000000000000000000000021036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/__init__.py0000664000175000017500000000000000000000000023135 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9063087 glance-29.0.0/glance/tests/unit/async_/flows/plugins/0000775000175000017500000000000000000000000022517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/plugins/__init__.py0000664000175000017500000000000000000000000024616 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/plugins/test_image_conversion.py0000664000175000017500000006107400000000000027467 0ustar00zuulzuul00000000000000# Copyright 2018 RedHat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import json import os from unittest import mock import glance_store from oslo_concurrency import processutils from oslo_config import cfg import glance.async_.flows.api_image_import as import_flow import glance.async_.flows.plugins.image_conversion as image_conversion from glance.async_ import utils as async_utils from glance.common import format_inspector from glance.common import utils from glance import domain from glance import gateway import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestConvertImageTask(test_utils.BaseTestCase): def setUp(self): super(TestConvertImageTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(output_format='qcow2', group='image_conversion') glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.stores = mock.MagicMock() self.image_id = UUID1 self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=self.image_id, disk_format='raw', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": {'disk_format': 'raw', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, self.image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.image.extra_properties = { 'os_glance_import_task': self.task.task_id, 'os_glance_importing_to_stores': mock.MagicMock(), 'os_glance_failed_import': "" } self.wrapper = import_flow.ImportActionWrapper(self.img_repo, self.image_id, self.task.task_id) self.detect_file_format_mock = mock.MagicMock() self.useFixture(fixtures.MockPatch('glance.common.format_inspector.' 'detect_file_format', self.detect_file_format_mock)) @mock.patch.object(os, 'stat') @mock.patch.object(os, 'remove') def test_image_convert_success(self, mock_os_remove, mock_os_stat): mock_os_remove.return_value = None mock_os_stat.return_value.st_size = 123 image_convert = image_conversion._ConvertImage(self.context, self.task.task_id, self.task_type, self.wrapper, self.stores) self.task_repo.get.return_value = self.task image = mock.MagicMock(image_id=self.image_id, virtual_size=None, extra_properties={ 'os_glance_import_task': self.task.task_id}, disk_format='raw') self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(json, 'loads') as jloads_mock: jloads_mock.return_value = {'format': 'raw', 'virtual-size': 456} inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'raw' inspector.safety_check.return_value = True image_convert.execute('file:///test/path.raw') # NOTE(hemanthm): Asserting that the source format is passed # to qemu-utis to avoid inferring the image format. This # shields us from an attack vector described at # https://bugs.launchpad.net/glance/+bug/1449062/comments/72 self.assertIn('-f', exc_mock.call_args[0]) self.assertEqual("qcow2", image.disk_format) self.assertEqual('bare', image.container_format) self.assertEqual('qcow2', image.disk_format) self.assertEqual(456, image.virtual_size) self.assertEqual(123, image.size) @mock.patch('os.remove') @mock.patch('os.stat') @mock.patch('oslo_concurrency.processutils.trycmd') @mock.patch('glance.async_.flows.plugins.image_conversion.LOG') def test_image_convert_excluded_for_iso(self, mock_log, mock_run, mock_stat, mock_remove, fmt='iso'): mock_stat.return_value = mock.MagicMock(stsize=123) stdout = json.dumps({'format': 'raw'}) mock_run.return_value = (stdout, '') image_convert = image_conversion._ConvertImage(self.context, self.task.task_id, self.task_type, self.wrapper, self.stores) self.task_repo.get.return_value = self.task image = mock.MagicMock(image_id=self.image_id, virtual_size=None, extra_properties={ 'os_glance_import_task': self.task.task_id}, disk_format=fmt) self.img_repo.get.return_value = image inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'iso' inspector.safety_check.return_value = True image_convert.execute('file:///test/path.iso') self.assertEqual(fmt, image.disk_format) mock_log.debug.assert_called_once_with( "Avoiding conversion of an image %s having " "`iso` disk format.", self.image_id) def test_image_convert_iso_lie_raw(self): e = self.assertRaises( RuntimeError, self.test_image_convert_excluded_for_iso, fmt='raw') self.assertEqual('Image has disallowed configuration', str(e)) def test_image_convert_iso_lie_qcow2(self): e = self.assertRaises( RuntimeError, self.test_image_convert_excluded_for_iso, fmt='qcow2') self.assertEqual('Image has disallowed configuration', str(e)) def _setup_image_convert_info_fail(self, disk_format='qcow2'): image_convert = image_conversion._ConvertImage(self.context, self.task.task_id, self.task_type, self.wrapper, self.stores) self.task_repo.get.return_value = self.task image = mock.MagicMock(image_id=self.image_id, virtual_size=None, extra_properties={ 'os_glance_import_task': self.task.task_id}, disk_format=disk_format) self.img_repo.get.return_value = image return image_convert def test_image_convert_fails_inspection(self): convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True exc_mock.side_effect = OSError('fail') self.assertRaises(OSError, convert.execute, 'file:///test/path.raw') exc_mock.assert_called_once_with( 'qemu-img', 'info', '-f', 'qcow2', '--output=json', '/test/path.raw', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS) # Make sure we did not update the image self.img_repo.save.assert_not_called() def test_image_convert_inspection_reports_error(self): convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = '', 'some error' inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.raw') exc_mock.assert_called_once_with( 'qemu-img', 'info', '-f', 'qcow2', '--output=json', '/test/path.raw', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS) # Make sure we did not update the image self.img_repo.save.assert_not_called() def test_image_convert_invalid_qcow(self): data = {'format': 'qcow2', 'backing-filename': '/etc/hosts'} convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True exc_mock.return_value = json.dumps(data), '' e = self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.qcow') self.assertEqual('QCOW images with backing files are not allowed', str(e)) def test_image_convert_invalid_qcow_data_file(self): data = {'format': 'qcow2', 'format-specific': { 'data': { 'data-file': '/etc/hosts', }, }} convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = json.dumps(data), '' inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True e = self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.qcow') self.assertEqual('QCOW images with data-file set are not allowed', str(e)) def test_image_convert_no_inspector_match(self): convert = self._setup_image_convert_info_fail() self.assertRaisesRegex(RuntimeError, 'Image format mismatch', convert.execute, 'file:///test/path.hpfs') def test_image_convert_fails_inspection_safety_check(self): convert = self._setup_image_convert_info_fail() inspector = self.detect_file_format_mock.return_value inspector.safety_check.return_value = False self.assertRaisesRegex(RuntimeError, 'Image has disallowed configuration', convert.execute, 'file:///test/path.qcow') def test_image_convert_fails_inspection_format_check(self): convert = self._setup_image_convert_info_fail() self.detect_file_format_mock.side_effect = ( format_inspector.ImageFormatError()) self.assertRaisesRegex(RuntimeError, 'Image format detection failed', convert.execute, 'file:///test/path.qcow') def test_image_convert_fails_inspection_error(self): convert = self._setup_image_convert_info_fail() self.detect_file_format_mock.side_effect = ValueError self.assertRaisesRegex(RuntimeError, 'Unable to inspect image', convert.execute, 'file:///test/path.qcow') def _test_image_convert_invalid_vmdk(self): data = {'format': 'vmdk', 'format-specific': { 'data': { 'create-type': 'monolithicFlat', }}} convert = self._setup_image_convert_info_fail(disk_format='vmdk') with mock.patch.object(processutils, 'execute') as exc_mock: inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'vmdk' inspector.safety_check.return_value = True exc_mock.return_value = json.dumps(data), '' convert.execute('file:///test/path.vmdk') def test_image_convert_invalid_vmdk(self): e = self.assertRaises(RuntimeError, self._test_image_convert_invalid_vmdk) self.assertEqual('Invalid VMDK create-type specified', str(e)) def test_image_convert_valid_vmdk_no_types(self): with mock.patch.object(CONF.image_format, 'vmdk_allowed_types', new=[]): # We make it past the VMDK check and fail because our file # does not exist e = self.assertRaises(RuntimeError, self._test_image_convert_invalid_vmdk) self.assertEqual('Image is a VMDK, but no VMDK createType is ' 'specified', str(e)) def test_image_convert_valid_vmdk(self): with mock.patch.object(CONF.image_format, 'vmdk_allowed_types', new=['monolithicSparse', 'monolithicFlat']): # We make it past the VMDK check and fail because our file # does not exist self.assertRaises(FileNotFoundError, self._test_image_convert_invalid_vmdk) def test_image_convert_fails(self): convert = self._setup_image_convert_info_fail(disk_format='raw') with mock.patch.object(processutils, 'execute') as exc_mock: inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'raw' inspector.safety_check.return_value = True exc_mock.side_effect = [('{"format":"raw"}', ''), OSError('convert_fail')] self.assertRaises(OSError, convert.execute, 'file:///test/path.raw') exc_mock.assert_has_calls( [mock.call('qemu-img', 'info', '-f', 'raw', '--output=json', '/test/path.raw', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS), mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', '/test/path.raw', '/test/path.raw.qcow2', log_errors=processutils.LOG_ALL_ERRORS)]) # Make sure we did not update the image self.img_repo.save.assert_not_called() def test_image_convert_reports_fail(self): convert = self._setup_image_convert_info_fail(disk_format='raw') with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.side_effect = [('{"format":"raw"}', ''), ('', 'some error')] inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'raw' inspector.safety_check.return_value = True self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.raw') exc_mock.assert_has_calls( [mock.call('qemu-img', 'info', '-f', 'raw', '--output=json', '/test/path.raw', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS), mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', '/test/path.raw', '/test/path.raw.qcow2', log_errors=processutils.LOG_ALL_ERRORS)]) # Make sure we did not update the image self.img_repo.save.assert_not_called() def test_image_convert_fails_source_format(self): convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ('{}', '') inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True exc = self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.raw') self.assertIn('Image metadata disagrees about format', str(exc)) exc_mock.assert_called_once_with( 'qemu-img', 'info', '-f', 'qcow2', '--output=json', '/test/path.raw', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS) # Make sure we did not update the image self.img_repo.save.assert_not_called() def test_image_convert_source_format_inspection_not_match(self): convert = self._setup_image_convert_info_fail(disk_format="raw") with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ( '{"format": "raw", "virtual-size": 123}', '') inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' exc = self.assertRaises(RuntimeError, convert.execute, 'file:///test/path.raw') self.assertIn('Image format mismatch', str(exc)) def test_image_convert_same_format_does_nothing(self): convert = self._setup_image_convert_info_fail() with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ( '{"format": "qcow2", "virtual-size": 123}', '') inspector = self.detect_file_format_mock.return_value inspector.__str__.return_value = 'qcow2' inspector.safety_check.return_value = True convert.execute('file:///test/path.qcow') # Make sure we only called qemu-img for inspection, not conversion exc_mock.assert_called_once_with( 'qemu-img', 'info', '-f', 'qcow2', '--output=json', '/test/path.qcow', prlimit=async_utils.QEMU_IMG_PROC_LIMITS, python_exec=convert.python, log_errors=processutils.LOG_ALL_ERRORS) # Make sure we set the virtual_size before we exited image = self.img_repo.get.return_value self.assertEqual(123, image.virtual_size) def _set_image_conversion(self, mock_os_remove, stores=[]): mock_os_remove.return_value = None wrapper = mock.MagicMock() image_convert = image_conversion._ConvertImage(self.context, self.task.task_id, self.task_type, wrapper, stores) action = wrapper.__enter__.return_value self.task_repo.get.return_value = self.task return action, image_convert @mock.patch.object(os, 'remove') def test_image_convert_revert_success_multiple_stores( self, mock_os_remove): action, image_convert = self._set_image_conversion( mock_os_remove, stores=self.stores) with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os.path, 'exists') as os_exists_mock: os_exists_mock.return_value = True image_convert.revert(result=mock.MagicMock()) self.assertEqual(1, mock_os_remove.call_count) action.set_image_attribute.assert_called_once_with( status='queued') action.remove_importing_stores.assert_called_once_with( self.stores) action.add_failed_stores.assert_called_once_with( self.stores) @mock.patch.object(os, 'remove') def test_image_convert_revert_success_single_store( self, mock_os_remove): action, image_convert = self._set_image_conversion(mock_os_remove) with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os.path, 'exists') as os_exists_mock: os_exists_mock.return_value = True image_convert.revert(result=mock.MagicMock()) self.assertEqual(1, mock_os_remove.call_count) self.assertEqual(0, action.remove_importing_stores.call_count) self.assertEqual(0, action.add_failed_store.call_count) action.set_image_attribute.assert_called_once_with( status='queued') @mock.patch.object(os, 'remove') def test_image_convert_revert_success_src_file_exists( self, mock_os_remove): action, image_convert = self._set_image_conversion( mock_os_remove, stores=self.stores) image_convert.src_path = mock.MagicMock() with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os.path, 'exists') as os_exists_mock: os_exists_mock.return_value = True image_convert.revert(result=mock.MagicMock()) action.set_image_attribute.assert_called_once_with( status='queued') action.remove_importing_stores.assert_called_once_with( self.stores) action.add_failed_stores.assert_called_once_with( self.stores) self.assertEqual(2, mock_os_remove.call_count) def test_image_convert_interpreter_configured(self): # By default, wsgi.python_interpreter is None; if it is # overridden, we should take the interpreter from config. fake_interpreter = '/usr/bin/python2.7' self.config(python_interpreter=fake_interpreter, group='wsgi') convert = image_conversion._ConvertImage(self.context, self.task.task_id, self.task_type, self.wrapper, self.stores) self.assertEqual(fake_interpreter, convert.python) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/plugins/test_inject_image_metadata.py0000664000175000017500000001157200000000000030414 0ustar00zuulzuul00000000000000# Copyright 2018 NTT DATA, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import glance_store from oslo_config import cfg import glance.async_.flows.api_image_import as import_flow import glance.async_.flows.plugins.inject_image_metadata as inject_metadata from glance.common import utils from glance import domain from glance import gateway from glance.tests.unit import utils as test_unit_utils import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestInjectImageMetadataTask(test_utils.BaseTestCase): def setUp(self): super(TestInjectImageMetadataTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.image_id = UUID1 self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format='qcow2', container_format='bare', extra_properties={}) self.img_repo.get.return_value = self.image task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, UUID1, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.image.extra_properties = { 'os_glance_import_task': self.task.task_id} self.img_repo.get.return_value = self.image self.wrapper = import_flow.ImportActionWrapper(self.img_repo, self.image_id, self.task.task_id) def test_inject_image_metadata_using_non_admin_user(self): context = test_unit_utils.get_fake_context(roles='member') inject_image_metadata = inject_metadata._InjectMetadataProperties( context, self.task.task_id, self.task_type, self.wrapper) self.config(inject={"test": "abc"}, group='inject_metadata_properties') inject_image_metadata.execute() self.img_repo.save.assert_called_once_with(self.image, 'queued') self.assertEqual({"test": "abc", "os_glance_import_task": self.task.task_id}, self.image.extra_properties) def test_inject_image_metadata_using_admin_user(self): context = test_unit_utils.get_fake_context(roles='admin') inject_image_metadata = inject_metadata._InjectMetadataProperties( context, self.task.task_id, self.task_type, self.wrapper) self.config(inject={"test": "abc"}, group='inject_metadata_properties') inject_image_metadata.execute() self.img_repo.save.assert_called_once_with(self.image, 'queued') def test_inject_image_metadata_empty(self): context = test_unit_utils.get_fake_context(roles='member') inject_image_metadata = inject_metadata._InjectMetadataProperties( context, self.task.task_id, self.task_type, self.wrapper) self.config(inject={}, group='inject_metadata_properties') inject_image_metadata.execute() self.img_repo.save.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_api_image_import.py0000664000175000017500000016331400000000000025764 0ustar00zuulzuul00000000000000# Copyright 2018 Verizon Wireless # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import urllib.error from glance_store import exceptions as store_exceptions from oslo_config import cfg from oslo_utils import units import taskflow import glance.async_.flows.api_image_import as import_flow from glance.common import exception from glance.common.scripts.image_import import main as image_import from glance import context from glance.domain import ExtraProperties from glance import gateway import glance.tests.utils as test_utils from cursive import exception as cursive_exception CONF = cfg.CONF TASK_TYPE = 'api_image_import' TASK_ID1 = 'dbbe7231-020f-4311-87e1-5aaa6da56c02' IMAGE_ID1 = '41f5b3b0-f54c-4cef-bd45-ce3e376a142f' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestApiImageImportTask(test_utils.BaseTestCase): def setUp(self): super(TestApiImageImportTask, self).setUp() self.wd_task_input = { "import_req": { "method": { "name": "web-download", "uri": "http://example.com/image.browncow" } } } self.gd_task_input = { "import_req": { "method": { "name": "glance-direct" } } } self.mock_task_repo = mock.MagicMock() self.mock_image_repo = mock.MagicMock() self.mock_image = self.mock_image_repo.get.return_value self.mock_image.extra_properties = { 'os_glance_import_task': TASK_ID1, 'os_glance_stage_host': 'http://glance2', } @mock.patch('glance.async_.flows.api_image_import._VerifyStaging.__init__') @mock.patch('taskflow.patterns.linear_flow.Flow.add') @mock.patch('taskflow.patterns.linear_flow.__init__') def _pass_uri(self, mock_lf_init, mock_flow_add, mock_VS_init, uri, file_uri, import_req): flow_kwargs = {"task_id": TASK_ID1, "task_type": TASK_TYPE, "task_repo": self.mock_task_repo, "image_repo": self.mock_image_repo, "image_id": IMAGE_ID1, "context": mock.MagicMock(), "import_req": import_req} mock_lf_init.return_value = None mock_VS_init.return_value = None self.config(node_staging_uri=uri) import_flow.get_flow(**flow_kwargs) mock_VS_init.assert_called_with(TASK_ID1, TASK_TYPE, self.mock_task_repo, file_uri) def test_get_flow_handles_node_uri_with_ending_slash(self): test_uri = 'file:///some/where/' expected_uri = '{0}{1}'.format(test_uri, IMAGE_ID1) self._pass_uri(uri=test_uri, file_uri=expected_uri, import_req=self.gd_task_input['import_req']) self._pass_uri(uri=test_uri, file_uri=expected_uri, import_req=self.wd_task_input['import_req']) def test_get_flow_handles_node_uri_without_ending_slash(self): test_uri = 'file:///some/where' expected_uri = '{0}/{1}'.format(test_uri, IMAGE_ID1) self._pass_uri(uri=test_uri, file_uri=expected_uri, import_req=self.wd_task_input['import_req']) self._pass_uri(uri=test_uri, file_uri=expected_uri, import_req=self.gd_task_input['import_req']) def test_get_flow_pops_stage_host(self): import_flow.get_flow(task_id=TASK_ID1, task_type=TASK_TYPE, task_repo=self.mock_task_repo, image_repo=self.mock_image_repo, image_id=IMAGE_ID1, context=mock.MagicMock(), import_req=self.gd_task_input['import_req']) self.assertNotIn('os_glance_stage_host', self.mock_image.extra_properties) self.assertIn('os_glance_import_task', self.mock_image.extra_properties) def test_assert_quota_no_task(self): ignored = mock.MagicMock() task_repo = mock.MagicMock() task_repo.get.return_value = None task_id = 'some-task' enforce_fn = mock.MagicMock() enforce_fn.side_effect = exception.LimitExceeded with mock.patch.object(import_flow, 'LOG') as mock_log: self.assertRaises(exception.LimitExceeded, import_flow.assert_quota, ignored, task_repo, task_id, [], ignored, enforce_fn) task_repo.get.assert_called_once_with('some-task') # Make sure we logged instead of crashed if no task was found mock_log.error.assert_called_once_with('Failed to find task %r to ' 'update after quota failure', 'some-task') task_repo.save.assert_not_called() def test_assert_quota(self): ignored = mock.MagicMock() task_repo = mock.MagicMock() task_id = 'some-task' enforce_fn = mock.MagicMock() enforce_fn.side_effect = exception.LimitExceeded wrapper = mock.MagicMock() action = wrapper.__enter__.return_value action.image_status = 'importing' self.assertRaises(exception.LimitExceeded, import_flow.assert_quota, ignored, task_repo, task_id, ['store1'], wrapper, enforce_fn) action.remove_importing_stores.assert_called_once_with(['store1']) action.set_image_attribute.assert_called_once_with(status='queued') task_repo.get.assert_called_once_with('some-task') task_repo.save.assert_called_once_with(task_repo.get.return_value) def test_assert_quota_copy(self): ignored = mock.MagicMock() task_repo = mock.MagicMock() task_id = 'some-task' enforce_fn = mock.MagicMock() enforce_fn.side_effect = exception.LimitExceeded wrapper = mock.MagicMock() action = wrapper.__enter__.return_value action.image_status = 'active' self.assertRaises(exception.LimitExceeded, import_flow.assert_quota, ignored, task_repo, task_id, ['store1'], wrapper, enforce_fn) action.remove_importing_stores.assert_called_once_with(['store1']) action.set_image_attribute.assert_not_called() task_repo.get.assert_called_once_with('some-task') task_repo.save.assert_called_once_with(task_repo.get.return_value) class TestImageLock(test_utils.BaseTestCase): def setUp(self): super(TestImageLock, self).setUp() self.img_repo = mock.MagicMock() @mock.patch('glance.async_.flows.api_image_import.LOG') def test_execute_confirms_lock(self, mock_log): self.img_repo.get.return_value.extra_properties = { 'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1, TASK_ID1) imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper) imagelock.execute() mock_log.debug.assert_called_once_with('Image %(image)s import task ' '%(task)s lock confirmed', {'image': IMAGE_ID1, 'task': TASK_ID1}) @mock.patch('glance.async_.flows.api_image_import.LOG') def test_execute_confirms_lock_not_held(self, mock_log): wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1, TASK_ID1) imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper) self.assertRaises(exception.TaskAbortedError, imagelock.execute) @mock.patch('glance.async_.flows.api_image_import.LOG') def test_revert_drops_lock(self, mock_log): wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1, TASK_ID1) imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper) with mock.patch.object(wrapper, 'drop_lock_for_task') as mock_drop: imagelock.revert(None) mock_drop.assert_called_once_with() mock_log.debug.assert_called_once_with('Image %(image)s import task ' '%(task)s dropped its lock ' 'after failure', {'image': IMAGE_ID1, 'task': TASK_ID1}) @mock.patch('glance.async_.flows.api_image_import.LOG') def test_revert_drops_lock_missing(self, mock_log): wrapper = import_flow.ImportActionWrapper(self.img_repo, IMAGE_ID1, TASK_ID1) imagelock = import_flow._ImageLock(TASK_ID1, TASK_TYPE, wrapper) with mock.patch.object(wrapper, 'drop_lock_for_task') as mock_drop: mock_drop.side_effect = exception.NotFound() imagelock.revert(None) mock_log.warning.assert_called_once_with('Image %(image)s import task ' '%(task)s lost its lock ' 'during execution!', {'image': IMAGE_ID1, 'task': TASK_ID1}) class TestImportToStoreTask(test_utils.BaseTestCase): def setUp(self): super(TestImportToStoreTask, self).setUp() self.gateway = gateway.Gateway() self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) self.img_factory = self.gateway.get_image_factory(self.context) def test_execute(self): wrapper = mock.MagicMock() action = mock.MagicMock() task_repo = mock.MagicMock() wrapper.__enter__.return_value = action image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", False, True) # Assert file_path is honored with mock.patch.object(image_import, '_execute') as mock_execute: image_import.execute(mock.sentinel.path) mock_execute.assert_called_once_with(action, mock.sentinel.path) # Assert file_path is optional with mock.patch.object(image_import, '_execute') as mock_execute: image_import.execute() mock_execute.assert_called_once_with(action, None) def test_execute_body_with_store(self): image = mock.MagicMock() img_repo = mock.MagicMock() img_repo.get.return_value = image task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", False, True) action = mock.MagicMock() image_import._execute(action, mock.sentinel.path) action.set_image_data.assert_called_once_with( mock.sentinel.path, TASK_ID1, backend='store1', set_active=True, callback=image_import._status_callback) action.remove_importing_stores(['store1']) def test_execute_body_with_store_no_path(self): image = mock.MagicMock() img_repo = mock.MagicMock() img_repo.get.return_value = image task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", False, True) action = mock.MagicMock() image_import._execute(action, None) action.set_image_data.assert_called_once_with( 'http://url', TASK_ID1, backend='store1', set_active=True, callback=image_import._status_callback) action.remove_importing_stores(['store1']) def test_execute_body_without_store(self): image = mock.MagicMock() img_repo = mock.MagicMock() img_repo.get.return_value = image task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", None, False, True) action = mock.MagicMock() image_import._execute(action, mock.sentinel.path) action.set_image_data.assert_called_once_with( mock.sentinel.path, TASK_ID1, backend=None, set_active=True, callback=image_import._status_callback) action.remove_importing_stores.assert_not_called() @mock.patch('glance.async_.flows.api_image_import.LOG.debug') @mock.patch('oslo_utils.timeutils.now') def test_status_callback_limits_rate(self, mock_now, mock_log): img_repo = mock.MagicMock() task_repo = mock.MagicMock() task_repo.get.return_value.status = 'processing' wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", None, False, True) expected_calls = [] log_call = mock.call('Image import %(image_id)s copied %(copied)i MiB', {'image_id': IMAGE_ID1, 'copied': 0}) action = mock.MagicMock(image_id=IMAGE_ID1) mock_now.return_value = 1000 image_import._status_callback(action, 32, 32) # First call will emit immediately because we only ran __init__ # which sets the last status to zero expected_calls.append(log_call) mock_log.assert_has_calls(expected_calls) image_import._status_callback(action, 32, 64) # Second call will not emit any other logs because no time # has passed mock_log.assert_has_calls(expected_calls) mock_now.return_value += 190 image_import._status_callback(action, 32, 96) # Third call will not emit any other logs because not enough # time has passed mock_log.assert_has_calls(expected_calls) mock_now.return_value += 300 image_import._status_callback(action, 32, 128) # Fourth call will emit because we crossed five minutes expected_calls.append(log_call) mock_log.assert_has_calls(expected_calls) mock_now.return_value += 150 image_import._status_callback(action, 32, 128) # Fifth call will not emit any other logs because not enough # time has passed mock_log.assert_has_calls(expected_calls) mock_now.return_value += 3600 image_import._status_callback(action, 32, 128) # Sixth call will emit because we crossed five minutes expected_calls.append(log_call) mock_log.assert_has_calls(expected_calls) def test_raises_when_image_deleted(self): img_repo = mock.MagicMock() task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", False, True) image = self.img_factory.new_image(image_id=UUID1) image.status = "deleted" img_repo.get.return_value = image self.assertRaises(exception.ImportTaskError, image_import.execute) @mock.patch("glance.async_.flows.api_image_import.image_import") def test_remove_store_from_property(self, mock_import): img_repo = mock.MagicMock() task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", True, True) extra_properties = {"os_glance_importing_to_stores": "store1,store2", "os_glance_import_task": TASK_ID1} image = self.img_factory.new_image(image_id=UUID1, extra_properties=extra_properties) img_repo.get.return_value = image image_import.execute() self.assertEqual( image.extra_properties['os_glance_importing_to_stores'], "store2") def test_revert_updates_status_keys(self): img_repo = mock.MagicMock() task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", True, True) extra_properties = {"os_glance_importing_to_stores": "store1,store2", "os_glance_import_task": TASK_ID1} image = self.img_factory.new_image(image_id=UUID1, extra_properties=extra_properties) img_repo.get.return_value = image fail_key = 'os_glance_failed_import' pend_key = 'os_glance_importing_to_stores' image_import.revert(None) self.assertEqual('store2', image.extra_properties[pend_key]) try: raise Exception('foo') except Exception: fake_exc_info = sys.exc_info() extra_properties = {"os_glance_importing_to_stores": "store1,store2"} image_import.revert(taskflow.types.failure.Failure(fake_exc_info)) self.assertEqual('store2', image.extra_properties[pend_key]) self.assertEqual('store1', image.extra_properties[fail_key]) @mock.patch("glance.async_.flows.api_image_import.image_import") def test_raises_when_all_stores_must_succeed(self, mock_import): img_repo = mock.MagicMock() task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", True, True) extra_properties = {'os_glance_import_task': TASK_ID1} image = self.img_factory.new_image(image_id=UUID1, extra_properties=extra_properties) img_repo.get.return_value = image mock_import.set_image_data.side_effect = \ cursive_exception.SignatureVerificationError( "Signature verification failed") self.assertRaises(cursive_exception.SignatureVerificationError, image_import.execute) @mock.patch("glance.async_.flows.api_image_import.image_import") def test_doesnt_raise_when_not_all_stores_must_succeed(self, mock_import): img_repo = mock.MagicMock() task_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(img_repo, IMAGE_ID1, TASK_ID1) image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, wrapper, "http://url", "store1", False, True) extra_properties = {'os_glance_import_task': TASK_ID1} image = self.img_factory.new_image(image_id=UUID1, extra_properties=extra_properties) img_repo.get.return_value = image mock_import.set_image_data.side_effect = \ cursive_exception.SignatureVerificationError( "Signature verification failed") try: image_import.execute() self.assertEqual(image.extra_properties['os_glance_failed_import'], "store1") except cursive_exception.SignatureVerificationError: self.fail("Exception shouldn't be raised") @mock.patch('glance.common.scripts.utils.get_task') def test_status_callback_updates_task_message(self, mock_get): task_repo = mock.MagicMock() image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, mock.MagicMock(), "http://url", "store1", False, True) task = mock.MagicMock() task.status = 'processing' mock_get.return_value = task action = mock.MagicMock() image_import._status_callback(action, 128, 256 * units.Mi) mock_get.assert_called_once_with(task_repo, TASK_ID1) task_repo.save.assert_called_once_with(task) self.assertEqual(_('Copied %i MiB' % 256), task.message) @mock.patch('glance.common.scripts.utils.get_task') def test_status_aborts_missing_task(self, mock_get): task_repo = mock.MagicMock() image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, mock.MagicMock(), "http://url", "store1", False, True) mock_get.return_value = None action = mock.MagicMock() self.assertRaises(exception.TaskNotFound, image_import._status_callback, action, 128, 256 * units.Mi) mock_get.assert_called_once_with(task_repo, TASK_ID1) task_repo.save.assert_not_called() @mock.patch('glance.common.scripts.utils.get_task') def test_status_aborts_invalid_task_state(self, mock_get): task_repo = mock.MagicMock() image_import = import_flow._ImportToStore(TASK_ID1, TASK_TYPE, task_repo, mock.MagicMock(), "http://url", "store1", False, True) task = mock.MagicMock() task.status = 'failed' mock_get.return_value = task action = mock.MagicMock() self.assertRaises(exception.TaskAbortedError, image_import._status_callback, action, 128, 256 * units.Mi) mock_get.assert_called_once_with(task_repo, TASK_ID1) task_repo.save.assert_not_called() class TestDeleteFromFS(test_utils.BaseTestCase): def test_delete_with_backends_deletes(self): task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE) self.config(enabled_backends='file:foo') with mock.patch.object(import_flow.store_api, 'delete') as mock_del: task.execute(mock.sentinel.path) mock_del.assert_called_once_with( mock.sentinel.path, 'os_glance_staging_store') def test_delete_with_backends_delete_fails(self): self.config(enabled_backends='file:foo') task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE) with mock.patch.object(import_flow.store_api, 'delete') as mock_del: mock_del.side_effect = store_exceptions.NotFound(image=IMAGE_ID1, message='Testing') # If we didn't swallow this we would explode here task.execute(mock.sentinel.path) mock_del.assert_called_once_with( mock.sentinel.path, 'os_glance_staging_store') # Raise something unexpected and make sure it bubbles up mock_del.side_effect = RuntimeError self.assertRaises(RuntimeError, task.execute, mock.sentinel.path) @mock.patch('os.path.exists') @mock.patch('os.unlink') def test_delete_without_backends_exists(self, mock_unlink, mock_exists): mock_exists.return_value = True task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE) task.execute('1234567foo') # FIXME(danms): I have no idea why the code arbitrarily snips # the first seven characters from the path. Need a comment or # *something*. mock_unlink.assert_called_once_with('foo') mock_unlink.reset_mock() mock_unlink.side_effect = OSError(123, 'failed') # Make sure we swallow the OSError and don't explode task.execute('1234567foo') @mock.patch('os.path.exists') @mock.patch('os.unlink') def test_delete_without_backends_missing(self, mock_unlink, mock_exists): mock_exists.return_value = False task = import_flow._DeleteFromFS(TASK_ID1, TASK_TYPE) task.execute('foo') mock_unlink.assert_not_called() class TestImportCopyImageTask(test_utils.BaseTestCase): def setUp(self): super(TestImportCopyImageTask, self).setUp() self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) @mock.patch("glance.async_.flows.api_image_import.image_import") @mock.patch('glance_store.get_store_from_store_identifier') def test_init_copy_flow_as_non_owner(self, mock_gs, mock_import): img_repo = mock.MagicMock() admin_repo = mock.MagicMock() fake_req = {"method": {"name": "copy-image"}, "backend": ['cheap']} fake_img = mock.MagicMock() fake_img.id = IMAGE_ID1 fake_img.status = 'active' fake_img.extra_properties = {'os_glance_import_task': TASK_ID1} admin_repo.get.return_value = fake_img import_flow.get_flow(task_id=TASK_ID1, task_type=TASK_TYPE, task_repo=mock.MagicMock(), image_repo=img_repo, admin_repo=admin_repo, image_id=IMAGE_ID1, import_req=fake_req, context=self.context, backend=['cheap']) # Assert that we saved the image with the admin repo instead of the # user-context one at the end of get_flow() when we initialize the # parameters. admin_repo.save.assert_called_once_with(fake_img, 'active') img_repo.save.assert_not_called() class TestVerifyImageStateTask(test_utils.BaseTestCase): def test_verify_active_status(self): fake_img = mock.MagicMock(status='active', extra_properties={ 'os_glance_import_task': TASK_ID1}) mock_repo = mock.MagicMock() mock_repo.get.return_value = fake_img wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE, wrapper, 'anything!') task.execute() fake_img.status = 'importing' self.assertRaises(import_flow._NoStoresSucceeded, task.execute) def test_revert_copy_status_unchanged(self): wrapper = mock.MagicMock() task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE, wrapper, 'copy-image') task.revert(mock.sentinel.result) # If we are doing copy-image, no state update should be made wrapper.__enter__.return_value.set_image_attribute.assert_not_called() def test_reverts_state_nocopy(self): wrapper = mock.MagicMock() task = import_flow._VerifyImageState(TASK_ID1, TASK_TYPE, wrapper, 'glance-direct') task.revert(mock.sentinel.result) # Except for copy-image, image state should revert to queued action = wrapper.__enter__.return_value action.set_image_attribute.assert_called_once_with(status='queued') class TestImportActionWrapper(test_utils.BaseTestCase): def test_wrapper_success(self): mock_repo = mock.MagicMock() mock_repo.get.return_value.extra_properties = { 'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) with wrapper as action: self.assertIsInstance(action, import_flow._ImportActions) mock_repo.get.assert_has_calls([mock.call(IMAGE_ID1), mock.call(IMAGE_ID1)]) mock_repo.save.assert_called_once_with( mock_repo.get.return_value, mock_repo.get.return_value.status) def test_wrapper_failure(self): mock_repo = mock.MagicMock() mock_repo.get.return_value.extra_properties = { 'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) class SpecificError(Exception): pass try: with wrapper: raise SpecificError('some failure') except SpecificError: # NOTE(danms): Make sure we only caught the test exception # and aren't hiding anything else pass mock_repo.get.assert_called_once_with(IMAGE_ID1) mock_repo.save.assert_not_called() @mock.patch.object(import_flow, 'LOG') def test_wrapper_logs_status(self, mock_log): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) mock_image.status = 'foo' with wrapper as action: action.set_image_attribute(status='bar') mock_log.debug.assert_called_once_with( 'Image %(image_id)s status changing from ' '%(old_status)s to %(new_status)s', {'image_id': IMAGE_ID1, 'old_status': 'foo', 'new_status': 'bar'}) self.assertEqual('bar', mock_image.status) def test_image_id_property(self): mock_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) self.assertEqual(IMAGE_ID1, wrapper.image_id) def test_set_image_attribute(self): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_image.status = 'bar' wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) with wrapper as action: action.set_image_attribute(status='foo', virtual_size=123, size=64) mock_repo.save.assert_called_once_with(mock_image, 'bar') self.assertEqual('foo', mock_image.status) self.assertEqual(123, mock_image.virtual_size) self.assertEqual(64, mock_image.size) def test_set_image_attribute_disallowed(self): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_image.status = 'bar' wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) with wrapper as action: self.assertRaises(AttributeError, action.set_image_attribute, id='foo') @mock.patch.object(import_flow, 'LOG') def test_set_image_extra_properties(self, mock_log): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.image_id = IMAGE_ID1 mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_image.status = 'bar' wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) # One banned property with wrapper as action: action.set_image_extra_properties({'os_glance_foo': 'bar'}) self.assertEqual({'os_glance_import_task': TASK_ID1}, mock_image.extra_properties) mock_log.warning.assert_called() mock_log.warning.reset_mock() # Two banned properties with wrapper as action: action.set_image_extra_properties({'os_glance_foo': 'bar', 'os_glance_baz': 'bat'}) self.assertEqual({'os_glance_import_task': TASK_ID1}, mock_image.extra_properties) mock_log.warning.assert_called() mock_log.warning.reset_mock() # One banned and one allowed property with wrapper as action: action.set_image_extra_properties({'foo': 'bar', 'os_glance_foo': 'baz'}) self.assertEqual({'foo': 'bar', 'os_glance_import_task': TASK_ID1}, mock_image.extra_properties) mock_log.warning.assert_called_once_with( 'Dropping %(key)s=%(val)s during metadata injection for %(image)s', {'key': 'os_glance_foo', 'val': 'baz', 'image': IMAGE_ID1}) def test_image_size(self): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.image_id = IMAGE_ID1 mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_image.size = 123 wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) with wrapper as action: self.assertEqual(123, action.image_size) def test_image_locations(self): mock_repo = mock.MagicMock() mock_image = mock_repo.get.return_value mock_image.image_id = IMAGE_ID1 mock_image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_image.locations = {'some': {'complex': ['structure']}} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) with wrapper as action: self.assertEqual({'some': {'complex': ['structure']}}, action.image_locations) # Mutate our copy action.image_locations['foo'] = 'bar' # Make sure we did not mutate the image itself self.assertEqual({'some': {'complex': ['structure']}}, mock_image.locations) def test_drop_lock_for_task(self): mock_repo = mock.MagicMock() mock_repo.get.return_value.extra_properties = { 'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) wrapper.drop_lock_for_task() mock_repo.delete_property_atomic.assert_called_once_with( mock_repo.get.return_value, 'os_glance_import_task', TASK_ID1) def test_assert_task_lock(self): mock_repo = mock.MagicMock() mock_repo.get.return_value.extra_properties = { 'os_glance_import_task': TASK_ID1} wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) wrapper.assert_task_lock() # Try again with a different task ID and it should fail wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, 'foo') self.assertRaises(exception.TaskAbortedError, wrapper.assert_task_lock) def _grab_image(self, wrapper): with wrapper: pass @mock.patch.object(import_flow, 'LOG') def test_check_task_lock(self, mock_log): mock_repo = mock.MagicMock() wrapper = import_flow.ImportActionWrapper(mock_repo, IMAGE_ID1, TASK_ID1) image = mock.MagicMock(image_id=IMAGE_ID1) image.extra_properties = {'os_glance_import_task': TASK_ID1} mock_repo.get.return_value = image self._grab_image(wrapper) mock_log.error.assert_not_called() image.extra_properties['os_glance_import_task'] = 'somethingelse' self.assertRaises(exception.TaskAbortedError, self._grab_image, wrapper) mock_log.error.assert_called_once_with( 'Image %(image)s import task %(task)s attempted to take action on ' 'image, but other task %(other)s holds the lock; Aborting.', {'image': image.image_id, 'task': TASK_ID1, 'other': 'somethingelse'}) class TestImportActions(test_utils.BaseTestCase): def setUp(self): super(TestImportActions, self).setUp() self.image = mock.MagicMock() self.image.image_id = IMAGE_ID1 self.image.status = 'active' self.image.disk_format = 'raw' self.image.container_format = 'bare' self.image.extra_properties = ExtraProperties({'speed': '88mph'}) self.image.checksum = mock.sentinel.checksum self.image.os_hash_algo = mock.sentinel.hash_algo self.image.os_hash_value = mock.sentinel.hash_value self.image.size = mock.sentinel.size self.actions = import_flow._ImportActions(self.image) def test_image_property_proxies(self): self.assertEqual(IMAGE_ID1, self.actions.image_id) self.assertEqual('active', self.actions.image_status) self.assertEqual('raw', self.actions.image_disk_format) self.assertEqual('bare', self.actions.image_container_format) self.assertEqual({'speed': '88mph'}, self.actions.image_extra_properties) def test_merge_store_list(self): # Addition with no existing property works self.actions.merge_store_list('stores', ['foo', 'bar']) self.assertEqual({'speed': '88mph', 'stores': 'bar,foo'}, self.image.extra_properties) # Addition adds to the list self.actions.merge_store_list('stores', ['baz']) self.assertEqual('bar,baz,foo', self.image.extra_properties['stores']) # Removal preserves the rest self.actions.merge_store_list('stores', ['foo'], subtract=True) self.assertEqual('bar,baz', self.image.extra_properties['stores']) # Duplicates aren't duplicated self.actions.merge_store_list('stores', ['bar']) self.assertEqual('bar,baz', self.image.extra_properties['stores']) # Removing the last store leaves the key empty but present self.actions.merge_store_list('stores', ['baz', 'bar'], subtract=True) self.assertEqual('', self.image.extra_properties['stores']) # Make sure we ignore falsey stores self.actions.merge_store_list('stores', ['', None]) self.assertEqual('', self.image.extra_properties['stores']) @mock.patch.object(import_flow, 'LOG') def test_merge_store_logs_info(self, mock_log): # Removal from non-present key logs debug, but does not fail self.actions.merge_store_list('stores', ['foo,bar'], subtract=True) mock_log.debug.assert_has_calls([ mock.call( 'Stores %(stores)s not in %(key)s for image %(image_id)s', {'image_id': IMAGE_ID1, 'key': 'stores', 'stores': 'foo,bar'}), mock.call( 'Image %(image_id)s %(key)s=%(stores)s', {'image_id': IMAGE_ID1, 'key': 'stores', 'stores': ''}), ]) mock_log.debug.reset_mock() self.actions.merge_store_list('stores', ['foo']) self.assertEqual('foo', self.image.extra_properties['stores']) mock_log.debug.reset_mock() # Removal from a list where store is not present logs debug, # but does not fail self.actions.merge_store_list('stores', ['bar'], subtract=True) self.assertEqual('foo', self.image.extra_properties['stores']) mock_log.debug.assert_has_calls([ mock.call( 'Stores %(stores)s not in %(key)s for image %(image_id)s', {'image_id': IMAGE_ID1, 'key': 'stores', 'stores': 'bar'}), mock.call( 'Image %(image_id)s %(key)s=%(stores)s', {'image_id': IMAGE_ID1, 'key': 'stores', 'stores': 'foo'}), ]) def test_store_list_helpers(self): self.actions.add_importing_stores(['foo', 'bar', 'baz']) self.actions.remove_importing_stores(['bar']) self.actions.add_failed_stores(['foo', 'bar']) self.actions.remove_failed_stores(['foo']) self.assertEqual({'speed': '88mph', 'os_glance_importing_to_stores': 'baz,foo', 'os_glance_failed_import': 'bar'}, self.image.extra_properties) @mock.patch.object(image_import, 'set_image_data') def test_set_image_data(self, mock_sid): self.assertEqual(mock_sid.return_value, self.actions.set_image_data( mock.sentinel.uri, mock.sentinel.task_id, mock.sentinel.backend, mock.sentinel.set_active)) mock_sid.assert_called_once_with( self.image, mock.sentinel.uri, mock.sentinel.task_id, backend=mock.sentinel.backend, set_active=mock.sentinel.set_active, callback=None) @mock.patch.object(image_import, 'set_image_data') def test_set_image_data_with_callback(self, mock_sid): def fake_set_image_data(image, uri, task_id, backend=None, set_active=False, callback=None): callback(mock.sentinel.chunk, mock.sentinel.total) mock_sid.side_effect = fake_set_image_data callback = mock.MagicMock() self.actions.set_image_data(mock.sentinel.uri, mock.sentinel.task_id, mock.sentinel.backend, mock.sentinel.set_active, callback=callback) # Make sure our callback was triggered through the functools.partial # to include the original params and the action wrapper callback.assert_called_once_with(self.actions, mock.sentinel.chunk, mock.sentinel.total) def test_remove_location_for_store(self): self.image.locations = [ {}, {'metadata': {}}, {'metadata': {'store': 'foo'}}, {'metadata': {'store': 'bar'}}, ] self.actions.remove_location_for_store('foo') self.assertEqual([{}, {'metadata': {}}, {'metadata': {'store': 'bar'}}], self.image.locations) # Add a second definition for bar and make sure only one is removed self.image.locations.append({'metadata': {'store': 'bar'}}) self.actions.remove_location_for_store('bar') self.assertEqual([{}, {'metadata': {}}, {'metadata': {'store': 'bar'}}], self.image.locations) def test_remove_location_for_store_last_location(self): self.image.locations = [{'metadata': {'store': 'foo'}}] self.actions.remove_location_for_store('foo') self.assertEqual([], self.image.locations) self.assertIsNone(self.image.checksum) self.assertIsNone(self.image.os_hash_algo) self.assertIsNone(self.image.os_hash_value) self.assertIsNone(self.image.size) @mock.patch.object(import_flow, 'LOG') def test_remove_location_for_store_pop_failures(self, mock_log): class TestList(list): def pop(self): pass self.image.locations = TestList([{'metadata': {'store': 'foo'}}]) with mock.patch.object(self.image.locations, 'pop', new_callable=mock.PropertyMock) as mock_pop: mock_pop.side_effect = store_exceptions.NotFound(image='image') self.actions.remove_location_for_store('foo') mock_log.warning.assert_called_once_with( _('Error deleting from store foo when reverting.')) mock_log.warning.reset_mock() mock_pop.side_effect = store_exceptions.Forbidden() self.actions.remove_location_for_store('foo') mock_log.warning.assert_called_once_with( _('Error deleting from store foo when reverting.')) mock_log.warning.reset_mock() mock_pop.side_effect = Exception self.actions.remove_location_for_store('foo') mock_log.warning.assert_called_once_with( _('Unexpected exception when deleting from store foo.')) mock_log.warning.reset_mock() def test_pop_extra_property(self): self.image.extra_properties = {'foo': '1', 'bar': 2} # Should remove, if present self.actions.pop_extra_property('foo') self.assertEqual({'bar': 2}, self.image.extra_properties) # Should not raise if missing self.actions.pop_extra_property('baz') self.assertEqual({'bar': 2}, self.image.extra_properties) @mock.patch('glance.common.scripts.utils.get_task') class TestCompleteTask(test_utils.BaseTestCase): def setUp(self): super(TestCompleteTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.wrapper = mock.MagicMock(image_id=IMAGE_ID1) def test_execute(self, mock_get_task): complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE, self.task_repo, self.wrapper) mock_get_task.return_value = self.task complete.execute() mock_get_task.assert_called_once_with(self.task_repo, TASK_ID1) self.task.succeed.assert_called_once_with({'image_id': IMAGE_ID1}) self.task_repo.save.assert_called_once_with(self.task) self.wrapper.drop_lock_for_task.assert_called_once_with() def test_execute_no_task(self, mock_get_task): mock_get_task.return_value = None complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE, self.task_repo, self.wrapper) complete.execute() self.task_repo.save.assert_not_called() self.wrapper.drop_lock_for_task.assert_called_once_with() def test_execute_succeed_fails(self, mock_get_task): mock_get_task.return_value = self.task self.task.succeed.side_effect = Exception('testing') complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE, self.task_repo, self.wrapper) complete.execute() self.task.fail.assert_called_once_with( _('Error: : testing')) self.task_repo.save.assert_called_once_with(self.task) self.wrapper.drop_lock_for_task.assert_called_once_with() def test_execute_drop_lock_fails(self, mock_get_task): mock_get_task.return_value = self.task self.wrapper.drop_lock_for_task.side_effect = exception.NotFound() complete = import_flow._CompleteTask(TASK_ID1, TASK_TYPE, self.task_repo, self.wrapper) with mock.patch('glance.async_.flows.api_image_import.LOG') as m_log: complete.execute() m_log.error.assert_called_once_with('Image %(image)s import task ' '%(task)s did not hold the ' 'lock upon completion!', {'image': IMAGE_ID1, 'task': TASK_ID1}) self.task.succeed.assert_called_once_with({'image_id': IMAGE_ID1}) class TestImportMetadata(test_utils.BaseTestCase): def setUp(self): super(TestImportMetadata, self).setUp() self.config(extra_properties=[], group="glance_download_properties") self.wrapper = mock.MagicMock(image_id=IMAGE_ID1) self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) self.import_req = { 'method': { 'glance_region': 'RegionTwo', 'glance_service_interface': 'public', 'glance_image_id': IMAGE_ID1 } } @mock.patch('urllib.request') @mock.patch('glance.async_.flows.api_image_import.json') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_execute_return_image_size(self, mock_gge, mock_json, mock_request): self.config(extra_properties=['hw:numa_nodes', 'os_hash'], group="glance_download_properties") mock_gge.return_value = 'https://other.cloud.foo/image' action = self.wrapper.__enter__.return_value mock_json.loads.return_value = { 'status': 'active', 'disk_format': 'qcow2', 'container_format': 'bare', 'hw:numa_nodes': '2', 'os_hash': 'hash', 'extra_metadata': 'hello', 'size': '12345' } task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) self.assertEqual(12345, task.execute()) mock_request.Request.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s' % ( IMAGE_ID1), headers={'X-Auth-Token': self.context.auth_token}) mock_gge.assert_called_once_with(self.context, 'RegionTwo', 'public') action.set_image_attribute.assert_called_once_with( disk_format='qcow2', container_format='bare') action.set_image_extra_properties.assert_called_once_with({ 'hw:numa_nodes': '2', 'os_hash': 'hash' }) @mock.patch('urllib.request') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_execute_fail_no_glance_endpoint(self, mock_gge, mock_request): action = self.wrapper.__enter__.return_value mock_gge.side_effect = exception.GlanceEndpointNotFound( region='RegionTwo', interface='public') task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) self.assertRaises(exception.GlanceEndpointNotFound, task.execute) action.assert_not_called() mock_request.assert_not_called() @mock.patch('urllib.request') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_execute_fail_remote_glance_unreachable(self, mock_gge, mock_r): action = self.wrapper.__enter__.return_value mock_r.urlopen.side_effect = urllib.error.HTTPError( '/file', 400, 'Test Fail', {}, None) task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) self.assertRaises(urllib.error.HTTPError, task.execute) action.assert_not_called() @mock.patch('urllib.request') @mock.patch('glance.async_.flows.api_image_import.json') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_execute_invalid_remote_image_state(self, mock_gge, mock_json, mock_request): action = self.wrapper.__enter__.return_value mock_gge.return_value = 'https://other.cloud.foo/image' mock_json.loads.return_value = { 'status': 'queued', } task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) self.assertRaises(import_flow._InvalidGlanceDownloadImageStatus, task.execute) action.assert_not_called() @mock.patch('urllib.request') @mock.patch('glance.async_.flows.api_image_import.json') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_execute_raise_if_no_size(self, mock_gge, mock_json, mock_request): self.config(extra_properties=['hw:numa_nodes', 'os_hash'], group="glance_download_properties") mock_gge.return_value = 'https://other.cloud.foo/image' action = self.wrapper.__enter__.return_value mock_json.loads.return_value = { 'status': 'active', 'disk_format': 'qcow2', 'container_format': 'bare', 'hw:numa_nodes': '2', 'os_hash': 'hash', 'extra_metadata': 'hello', } task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) self.assertRaises(exception.ImportTaskError, task.execute) mock_request.Request.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s' % ( IMAGE_ID1), headers={'X-Auth-Token': self.context.auth_token}) mock_gge.assert_called_once_with(self.context, 'RegionTwo', 'public') action.set_image_attribute.assert_called_once_with( disk_format='qcow2', container_format='bare') action.set_image_extra_properties.assert_called_once_with({ 'hw:numa_nodes': '2', 'os_hash': 'hash' }) def test_revert_rollback_metadata_value(self): action = self.wrapper.__enter__.return_value task = import_flow._ImportMetadata(TASK_ID1, TASK_TYPE, self.context, self.wrapper, self.import_req) task.properties = {'prop1': 'value1', 'prop2': 'value2'} task.old_properties = {'prop1': 'orig_val', 'old_prop': 'old_value'} task.old_attributes = {'container_format': 'bare', 'disk_format': 'qcow2'} task.revert(None) action.set_image_attribute.assert_called_once_with( status='queued', container_format='bare', disk_format='qcow2') action.pop_extra_property.assert_called_once_with('prop2') action.set_image_extra_properties.assert_called_once_with( task.old_properties) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_base_download.py0000664000175000017500000002137500000000000025260 0ustar00zuulzuul00000000000000# Copyright 2022 OVHCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from glance_store import backend from oslo_config import cfg from taskflow.types import failure from glance.async_.flows import api_image_import import glance.common.exception from glance import domain import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestBaseDownloadTask(test_utils.BaseTestCase): def setUp(self): super(TestBaseDownloadTask, self).setUp() self.config(node_staging_uri='/tmp/staging') self.image_repo = mock.MagicMock() self.image_id = mock.MagicMock() self.uri = mock.MagicMock() self.plugin_name = 'FakeBaseDownload' self.task_factory = domain.TaskFactory() task_input = { "import_req": { 'method': { 'name': 'web_download', 'uri': 'http://cloud.foo/image.qcow2' } } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, self.image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.task_id = self.task.task_id self.action_wrapper = api_image_import.ImportActionWrapper( self.image_repo, self.image_id, self.task_id) self.image_repo.get.return_value = mock.MagicMock( extra_properties={'os_glance_import_task': self.task_id}) self.base_download_task = unit_test_utils.FakeBaseDownloadPlugin( self.task.task_id, self.task_type, self.action_wrapper, ['foo'], self.plugin_name) self.base_download_task._path = "/path/to_downloaded_data" def test_base_download_node_staging_uri_is_none(self): self.config(node_staging_uri=None) self.assertRaises(glance.common.exception.BadTaskConfiguration, unit_test_utils.FakeBaseDownloadPlugin, self.task.task_id, self.task_type, self.uri, self.action_wrapper, ['foo']) @mock.patch.object(cfg.ConfigOpts, "set_override") def test_base_download_node_store_initialization_failed( self, mock_override): with mock.patch.object(backend, '_load_store') as mock_load_store: mock_load_store.return_value = None self.assertRaises(glance.common.exception.BadTaskConfiguration, unit_test_utils.FakeBaseDownloadPlugin, self.task.task_id, self.task_type, self.uri, self.action_wrapper, ['foo']) mock_override.assert_called() def test_base_download_delete_staging_image_not_exist(self): staging_path = "file:///tmp/staging/temp-image" delete_from_fs_task = api_image_import._DeleteFromFS( self.task.task_id, self.task_type) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.return_value = False with mock.patch.object(os, "unlink") as mock_unlik: delete_from_fs_task.execute(staging_path) self.assertEqual(1, mock_exists.call_count) self.assertEqual(0, mock_unlik.call_count) @mock.patch.object(os.path, "exists") def test_base_download_delete_staging_image_failed(self, mock_exists): mock_exists.return_value = True staging_path = "file:///tmp/staging/temp-image" delete_from_fs_task = api_image_import._DeleteFromFS( self.task.task_id, self.task_type) with mock.patch.object(os, "unlink") as mock_unlink: try: delete_from_fs_task.execute(staging_path) except OSError: self.assertEqual(1, mock_unlink.call_count) self.assertEqual(1, mock_exists.call_count) @mock.patch.object(os.path, "exists") def test_base_download_delete_staging_image_succeed(self, mock_exists): mock_exists.return_value = True staging_path = "file:///tmp/staging/temp-image" delete_from_fs_task = api_image_import._DeleteFromFS( self.task.task_id, self.task_type) with mock.patch.object(os, "unlink") as mock_unlik: delete_from_fs_task.execute(staging_path) self.assertEqual(1, mock_exists.call_count) self.assertEqual(1, mock_unlik.call_count) @mock.patch( "glance.async_.flows._internal_plugins.base_download.store_api") def test_base_download_revert_with_failure(self, mock_store_api): image = self.image_repo.get.return_value image.extra_properties['os_glance_importing_to_stores'] = 'foo' image.extra_properties['os_glance_failed_import'] = '' self.base_download_task.execute = mock.MagicMock( side_effect=glance.common.exception.ImportTaskError) self.base_download_task.revert(None) mock_store_api.delete_from_backend.assert_called_once_with( "/path/to_downloaded_data") self.assertEqual(1, self.image_repo.save.call_count) self.assertEqual( '', image.extra_properties['os_glance_importing_to_stores']) self.assertEqual( 'foo', image.extra_properties['os_glance_failed_import']) @mock.patch( "glance.async_.flows._internal_plugins.base_download.store_api") def test_base_download_revert_without_failure_multi_store(self, mock_store_api): enabled_backends = { 'fast': 'file', 'cheap': 'file' } self.config(enabled_backends=enabled_backends) self.base_download_task.revert("/path/to_downloaded_data") mock_store_api.delete.assert_called_once_with( "/path/to_downloaded_data", None) @mock.patch( "glance.async_.flows._internal_plugins.base_download.store_api") def test_base_download_revert_with_failure_without_path(self, mock_store_api): image = self.image_repo.get.return_value image.status = 'importing' image.extra_properties['os_glance_importing_to_stores'] = 'foo' image.extra_properties['os_glance_failed_import'] = '' result = failure.Failure.from_exception( glance.common.exception.ImportTaskError()) self.base_download_task._path = None self.base_download_task.revert(result) mock_store_api.delete_from_backend.assert_not_called() # NOTE(danms): Since we told revert that we were the problem, # we should have updated the image status and moved the stores # to the failed list. self.image_repo.save.assert_called_once_with(image, 'importing') self.assertEqual('queued', image.status) self.assertEqual( '', image.extra_properties['os_glance_importing_to_stores']) self.assertEqual( 'foo', image.extra_properties['os_glance_failed_import']) @mock.patch( "glance.async_.flows._internal_plugins.base_download.store_api") def test_base_download_revert_with_failure_with_path(self, mock_store_api): result = failure.Failure.from_exception( glance.common.exception.ImportTaskError()) self.base_download_task.revert(result) mock_store_api.delete_from_backend.assert_called_once_with( "/path/to_downloaded_data") @mock.patch( "glance.async_.flows._internal_plugins.base_download.store_api") def test_base_download_delete_fails_on_revert(self, mock_store_api): result = failure.Failure.from_exception( glance.common.exception.ImportTaskError()) mock_store_api.delete_from_backend.side_effect = Exception # this will verify that revert does not break because of failure # while deleting data in staging area self.base_download_task.revert(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_convert.py0000664000175000017500000002010400000000000024124 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import json import os from unittest import mock import glance_store from oslo_concurrency import processutils from oslo_config import cfg from glance.async_.flows import convert from glance.async_ import taskflow_executor from glance.common.scripts import utils as script_utils from glance.common import utils from glance import domain from glance import gateway import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format='raw', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, UUID1, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(conversion_format='qcow2', group='taskflow_executor') glance_store.create_stores(CONF) @mock.patch.object(os, 'unlink') def test_convert_success(self, mock_unlink): image_convert = convert._Convert(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os, 'rename') as rm_mock: rm_mock.return_value = None image_convert.execute(image, 'file:///test/path.raw') # NOTE(hemanthm): Asserting that the source format is passed # to qemu-utis to avoid inferring the image format. This # shields us from an attack vector described at # https://bugs.launchpad.net/glance/+bug/1449062/comments/72 self.assertIn('-f', exc_mock.call_args[0]) def test_convert_revert_success(self): image_convert = convert._Convert(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os, 'remove') as rmtree_mock: rmtree_mock.return_value = None image_convert.revert(image, 'file:///tmp/test') def test_import_flow_with_convert_and_introspect(self): self.config(engine_mode='serial', group='taskflow_executor') image = self.img_factory.new_image(image_id=UUID1, disk_format='raw', container_format='bare') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = image img_factory.new_image.side_effect = create_image image_path = os.path.join(self.work_dir, image.image_id) def fake_execute(*args, **kwargs): if 'info' in args: # NOTE(flaper87): Make sure the file actually # exists. Extra check to verify previous tasks did # what they were supposed to do. assert os.path.exists(args[3].split("file://")[-1]) return (json.dumps({ "virtual-size": 10737418240, "filename": "/tmp/image.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 373030912, "format-specific": { "type": "qcow2", "data": { "compat": "0.10" } }, "dirty-flag": False }), None) open("%s.converted" % image_path, 'a').close() return ("", None) with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.side_effect = fake_execute executor.begin_processing(self.task.task_id) # NOTE(flaper87): DeleteFromFS should've deleted this # file. Make sure it doesn't exist. self.assertFalse(os.path.exists(image_path)) # NOTE(flaper87): Workdir should be empty after all # the tasks have been executed. self.assertEqual([], os.listdir(self.work_dir)) self.assertEqual('qcow2', image.disk_format) self.assertEqual(10737418240, image.virtual_size) # NOTE(hemanthm): Asserting that the source format is passed # to qemu-utis to avoid inferring the image format when # converting. This shields us from an attack vector described # at https://bugs.launchpad.net/glance/+bug/1449062/comments/72 # # A total of three calls will be made to 'execute': 'info', # 'convert' and 'info' towards introspection, conversion and # OVF packaging respectively. We care about the 'convert' call # here, hence we fetch the 2nd set of args from the args list. convert_call_args, _ = exc_mock.call_args_list[1] self.assertIn('-f', convert_call_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_copy_image.py0000664000175000017500000002215300000000000024566 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os from unittest import mock import glance_store as store_api from oslo_config import cfg from glance.async_.flows._internal_plugins import copy_image from glance.async_.flows import api_image_import import glance.common.exception as exception from glance import domain import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' FAKEHASHALGO = 'fake-name-for-sha512' CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' RESERVED_STORES = { 'os_glance_staging_store': 'file', } def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'visibility': 'shared', 'properties': {}, 'checksum': None, 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj class TestCopyImageTask(test_utils.BaseTestCase): def setUp(self): super(TestCopyImageTask, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self._create_images() self.image_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.image_id = UUID1 self.staging_store = mock.MagicMock() self.task_factory = domain.TaskFactory() task_input = { "import_req": { 'method': { 'name': 'copy-image', }, 'stores': ['fast'] } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, self.image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.task_id = self.task.task_id self.action_wrapper = api_image_import.ImportActionWrapper( self.image_repo, self.image_id, self.task_id) self.image_repo.get.return_value = mock.MagicMock( extra_properties={'os_glance_import_task': self.task_id}) stores = {'cheap': 'file', 'fast': 'file'} self.config(enabled_backends=stores) store_api.register_store_opts(CONF, reserved_stores=RESERVED_STORES) self.config(default_backend='fast', group='glance_store') store_api.create_multi_stores(CONF, reserved_stores=RESERVED_STORES) def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, name='1', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, locations=[{'url': 'file://%s/%s' % (self.test_dir, UUID1), 'metadata': {'store': 'fast'}, 'status': 'active'}], created_at=DATETIME + datetime.timedelta(seconds=1)), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) @mock.patch.object(store_api, 'get_store_from_store_identifier') def test_copy_image_to_staging_store(self, mock_store_api): mock_store_api.return_value = self.staging_store copy_image_task = copy_image._CopyImage( self.task.task_id, self.task_type, self.image_repo, self.action_wrapper) with mock.patch.object(self.image_repo, 'get') as get_mock: get_mock.return_value = mock.MagicMock( image_id=self.images[0]['id'], locations=self.images[0]['locations'], extra_properties={'os_glance_import_task': self.task.task_id}, status=self.images[0]['status'] ) with mock.patch.object(store_api, 'get') as get_data: get_data.return_value = (b"dddd", 4) copy_image_task.execute() self.staging_store.add.assert_called_once() mock_store_api.assert_called_once_with( "os_glance_staging_store") @mock.patch.object(os, 'unlink') @mock.patch.object(os.path, 'getsize') @mock.patch.object(os.path, 'exists') @mock.patch.object(store_api, 'get_store_from_store_identifier') def test_copy_image_to_staging_store_partial_data_exists( self, mock_store_api, mock_exists, mock_getsize, mock_unlink): mock_store_api.return_value = self.staging_store mock_exists.return_value = True mock_getsize.return_value = 3 copy_image_task = copy_image._CopyImage( self.task.task_id, self.task_type, self.image_repo, self.action_wrapper) with mock.patch.object(self.image_repo, 'get') as get_mock: get_mock.return_value = mock.MagicMock( image_id=self.images[0]['id'], locations=self.images[0]['locations'], status=self.images[0]['status'], extra_properties={'os_glance_import_task': self.task.task_id}, size=4 ) with mock.patch.object(store_api, 'get') as get_data: get_data.return_value = (b"dddd", 4) copy_image_task.execute() mock_exists.assert_called_once() mock_getsize.assert_called_once() mock_unlink.assert_called_once() self.staging_store.add.assert_called_once() mock_store_api.assert_called_once_with( "os_glance_staging_store") @mock.patch.object(os, 'unlink') @mock.patch.object(os.path, 'getsize') @mock.patch.object(os.path, 'exists') @mock.patch.object(store_api, 'get_store_from_store_identifier') def test_copy_image_to_staging_store_data_exists( self, mock_store_api, mock_exists, mock_getsize, mock_unlink): mock_store_api.return_value = self.staging_store mock_exists.return_value = True mock_getsize.return_value = 4 copy_image_task = copy_image._CopyImage( self.task.task_id, self.task_type, self.image_repo, self.action_wrapper) with mock.patch.object(self.image_repo, 'get') as get_mock: get_mock.return_value = mock.MagicMock( image_id=self.images[0]['id'], locations=self.images[0]['locations'], status=self.images[0]['status'], extra_properties={'os_glance_import_task': self.task.task_id}, size=4 ) copy_image_task.execute() mock_exists.assert_called_once() mock_store_api.assert_called_once_with( "os_glance_staging_store") mock_getsize.assert_called_once() # As valid image data already exists in staging area # it does not remove it and also does not download # it again to staging area mock_unlink.assert_not_called() self.staging_store.add.assert_not_called() @mock.patch.object(store_api, 'get_store_from_store_identifier') def test_copy_non_existing_image_to_staging_store_(self, mock_store_api): mock_store_api.return_value = self.staging_store copy_image_task = copy_image._CopyImage( self.task.task_id, self.task_type, self.image_repo, self.action_wrapper) with mock.patch.object(self.image_repo, 'get') as get_mock: get_mock.side_effect = exception.NotFound() self.assertRaises(exception.NotFound, copy_image_task.execute) mock_store_api.assert_called_once_with( "os_glance_staging_store") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_glance_download.py0000664000175000017500000001726200000000000025577 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import urllib.error from glance_store._drivers import filesystem from oslo_config import cfg from oslo_utils.fixture import uuidsentinel from glance.async_.flows._internal_plugins import glance_download from glance.async_.flows import api_image_import import glance.common.exception import glance.context from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestGlanceDownloadTask(test_utils.BaseTestCase): def setUp(self): super(TestGlanceDownloadTask, self).setUp() self.config(node_staging_uri='/tmp/staging') self.image_repo = mock.MagicMock() self.image_id = mock.MagicMock() self.uri = mock.MagicMock() self.task_factory = domain.TaskFactory() self.context = glance.context.RequestContext(tenant=TENANT1, auth_token='token') task_input = { "import_req": { 'method': { 'name': 'glance-download', 'glance_image_id': uuidsentinel.remote_image, 'glance_region': 'RegionTwo', 'glance_service_interface': 'public', } } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, self.image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.task_id = self.task.task_id self.action_wrapper = api_image_import.ImportActionWrapper( self.image_repo, self.image_id, self.task_id) self.image_repo.get.return_value = mock.MagicMock( extra_properties={'os_glance_import_task': self.task_id}) @mock.patch.object(filesystem.Store, 'add') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_glance_download(self, mock_gge, mock_add): mock_gge.return_value = 'https://other.cloud.foo/image' glance_download_task = glance_download._DownloadGlanceImage( self.context, self.task.task_id, self.task_type, self.action_wrapper, ['foo'], 'RegionTwo', uuidsentinel.remote_image, 'public') with mock.patch('urllib.request') as mock_request: mock_add.return_value = ["path", 12345] self.assertEqual(glance_download_task.execute(12345), "path") mock_add.assert_called_once_with( self.image_id, mock_request.urlopen.return_value, 0) mock_request.Request.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s/file' % ( uuidsentinel.remote_image), headers={'X-Auth-Token': self.context.auth_token}) mock_gge.assert_called_once_with(self.context, 'RegionTwo', 'public') @mock.patch.object(filesystem.Store, 'add') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_glance_download_failed(self, mock_gge, mock_add): mock_gge.return_value = 'https://other.cloud.foo/image' glance_download_task = glance_download._DownloadGlanceImage( self.context, self.task.task_id, self.task_type, self.action_wrapper, ['foo'], 'RegionTwo', uuidsentinel.remote_image, 'public') with mock.patch('urllib.request') as mock_request: mock_request.urlopen.side_effect = urllib.error.HTTPError( '/file', 400, 'Test Fail', {}, None) self.assertRaises(urllib.error.HTTPError, glance_download_task.execute, 12345) mock_add.assert_not_called() mock_request.Request.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s/file' % ( uuidsentinel.remote_image), headers={'X-Auth-Token': self.context.auth_token}) mock_gge.assert_called_once_with(self.context, 'RegionTwo', 'public') @mock.patch('urllib.request') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_glance_download_no_glance_endpoint(self, mock_gge, mock_request): mock_gge.side_effect = glance.common.exception.GlanceEndpointNotFound( region='RegionTwo', interface='public') glance_download_task = glance_download._DownloadGlanceImage( self.context, self.task.task_id, self.task_type, self.action_wrapper, ['foo'], 'RegionTwo', uuidsentinel.remote_image, 'public') self.assertRaises(glance.common.exception.GlanceEndpointNotFound, glance_download_task.execute, 12345) mock_request.assert_not_called() @mock.patch.object(filesystem.Store, 'add') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_glance_download_size_mismatch(self, mock_gge, mock_add): mock_gge.return_value = 'https://other.cloud.foo/image' glance_download_task = glance_download._DownloadGlanceImage( self.context, self.task.task_id, self.task_type, self.action_wrapper, ['foo'], 'RegionTwo', uuidsentinel.remote_image, 'public') with mock.patch('urllib.request') as mock_request: mock_add.return_value = ["path", 1] self.assertRaises(glance.common.exception.ImportTaskError, glance_download_task.execute, 12345) mock_add.assert_called_once_with( self.image_id, mock_request.urlopen.return_value, 0) mock_request.Request.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s/file' % ( uuidsentinel.remote_image), headers={'X-Auth-Token': self.context.auth_token}) mock_gge.assert_called_once_with(self.context, 'RegionTwo', 'public') @mock.patch('urllib.request') @mock.patch('glance.common.utils.validate_import_uri') @mock.patch('glance.async_.utils.get_glance_endpoint') def test_glance_download_wrong_download_url(self, mock_gge, mock_validate, mock_request): mock_validate.return_value = False mock_gge.return_value = 'https://other.cloud.foo/image' glance_download_task = glance_download._DownloadGlanceImage( self.context, self.task.task_id, self.task_type, self.action_wrapper, ['foo'], 'RegionTwo', uuidsentinel.remote_image, 'public') self.assertRaises(glance.common.exception.ImportTaskError, glance_download_task.execute, 12345) mock_request.assert_not_called() mock_validate.assert_called_once_with( 'https://other.cloud.foo/image/v2/images/%s/file' % ( uuidsentinel.remote_image)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_import.py0000664000175000017500000004564100000000000023773 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import json import os from unittest import mock import urllib import glance_store from oslo_concurrency import processutils as putils from oslo_config import cfg from taskflow import task from taskflow.types import failure import glance.async_.flows.base_import as import_flow from glance.async_ import taskflow_executor from glance.async_ import utils as async_utils from glance.common.scripts.image_import import main as image_import from glance.common.scripts import utils as script_utils from glance.common import utils from glance import context from glance import domain from glance import gateway import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class _ErrorTask(task.Task): def execute(self): raise RuntimeError() class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = context.RequestContext( user_id=TENANT1, project_id=TENANT1, overwrite=False ) self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format='qcow2', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, UUID1, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) def _assert_qemu_process_limits(self, exec_mock): # NOTE(hemanthm): Assert that process limits are being applied # on "qemu-img info" calls. See bug #1449062 for more details. kw_args = exec_mock.call_args[1] self.assertIn('prlimit', kw_args) self.assertEqual(async_utils.QEMU_IMG_PROC_LIMITS, kw_args.get('prlimit')) def test_import_flow(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) executor.begin_processing(self.task.task_id) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) self.assertEqual(1, len(list(self.image.locations))) self.assertEqual("file://%s%s%s" % (self.test_dir, os.sep, self.image.image_id), self.image.locations[0]['url']) self._assert_qemu_process_limits(tmock) def test_import_flow_missing_work_dir(self): self.config(engine_mode='serial', group='taskflow_executor') self.config(work_dir=None, group='task') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(import_flow._ImportToFS, 'execute') as emk: executor.begin_processing(self.task.task_id) self.assertFalse(emk.called) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) def test_import_flow_invalid_data_file(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: out = json.dumps({'format-specific': {'data': {'data-file': 'somefile'}}}) tmock.return_value = (out, '') e = self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self.assertIn('somefile', str(e)) def test_import_flow_revert_import_to_fs(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.side_effect = RuntimeError with mock.patch.object(import_flow._ImportToFS, 'revert') as rmock: self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self.assertTrue(rmock.called) self.assertIsInstance(rmock.call_args[1]['result'], failure.Failure) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) # Note(sabari): The image should not have been uploaded to # the store as the flow failed before ImportToStore Task. self.assertFalse(os.path.exists(image_path)) def test_import_flow_backed_file_import_to_fs(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'backing-filename': '/etc/password' }), None) with mock.patch.object(import_flow._ImportToFS, 'revert') as rmock: self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self.assertTrue(rmock.called) self.assertIsInstance(rmock.call_args[1]['result'], failure.Failure) self._assert_qemu_process_limits(tmock) image_path = os.path.join(self.test_dir, self.image.image_id) fname = "%s.tasks_import" % image_path tmp_image_path = os.path.join(self.work_dir, fname) self.assertFalse(os.path.exists(tmp_image_path)) # Note(sabari): The image should not have been uploaded to # the store as the flow failed before ImportToStore Task. self.assertFalse(os.path.exists(image_path)) def test_import_flow_revert(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = io.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) with mock.patch.object(import_flow, "_get_import_flows") as imock: imock.return_value = (x for x in [_ErrorTask()]) self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self._assert_qemu_process_limits(tmock) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, ("%s.tasks_import" % image_path)) self.assertFalse(os.path.exists(tmp_image_path)) # NOTE(flaper87): Eventually, we want this to be assertTrue # The current issue is there's no way to tell taskflow to # continue on failures. That is, revert the subflow but # keep executing the parent flow. Under # discussion/development. self.assertFalse(os.path.exists(image_path)) def test_import_flow_no_import_flows(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(urllib.request, 'urlopen') as umock: content = b"TEST_IMAGE" umock.return_value = io.BytesIO(content) with mock.patch.object(import_flow, "_get_import_flows") as imock: imock.return_value = (x for x in []) executor.begin_processing(self.task.task_id) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) self.assertEqual(1, umock.call_count) with open(image_path, 'rb') as ifile: self.assertEqual(content, ifile.read()) def test_create_image(self): image_create = import_flow._CreateImage(self.task.task_id, self.task_type, self.task_repo, self.img_repo, self.img_factory) self.task_repo.get.return_value = self.task with mock.patch.object(image_import, 'create_image') as ci_mock: ci_mock.return_value = mock.Mock() image_create.execute() ci_mock.assert_called_once_with(self.img_repo, self.img_factory, {'container_format': 'bare', 'disk_format': 'qcow2'}, self.task.task_id) def test_save_image(self): save_image = import_flow._SaveImage(self.task.task_id, self.task_type, self.img_repo) with mock.patch.object(self.img_repo, 'get') as get_mock: image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, status='saving') get_mock.return_value = image with mock.patch.object(self.img_repo, 'save') as save_mock: save_image.execute(image.image_id) get_mock.assert_called_once_with(image_id) save_mock.assert_called_once_with(image) self.assertEqual('active', image.status) def test_import_to_fs(self): import_fs = import_flow._ImportToFS(self.task.task_id, self.task_type, self.task_repo, 'http://example.com/image.qcow2') with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: content = b"test" dmock.return_value = [content] with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) image_id = UUID1 path = import_fs.execute(image_id) reader, size = glance_store.get_from_backend(path) self.assertEqual(4, size) self.assertEqual(content, b"".join(reader)) image_path = os.path.join(self.work_dir, image_id) tmp_image_path = os.path.join(self.work_dir, image_path) self.assertTrue(os.path.exists(tmp_image_path)) self._assert_qemu_process_limits(tmock) def test_delete_from_fs(self): delete_fs = import_flow._DeleteFromFS(self.task.task_id, self.task_type) data = [b"test"] store = glance_store.get_store_from_scheme('file') path = glance_store.store_add_to_backend(mock.sentinel.image_id, data, mock.sentinel.image_size, store, context=None)[0] path_wo_scheme = path.split("file://")[1] self.assertTrue(os.path.exists(path_wo_scheme)) delete_fs.execute(path) self.assertFalse(os.path.exists(path_wo_scheme)) def test_complete_task(self): complete_task = import_flow._CompleteTask(self.task.task_id, self.task_type, self.task_repo) image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id) self.task_repo.get.return_value = self.task with mock.patch.object(self.task, 'succeed') as succeed: complete_task.execute(image.image_id) succeed.assert_called_once_with({'image_id': image_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_introspect.py0000664000175000017500000001130100000000000024635 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import glance_store from oslo_concurrency import processutils from oslo_config import cfg from glance.async_.flows import introspect from glance.async_ import utils as async_utils from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() self.task_factory = domain.TaskFactory() task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": mock.sentinel.image_properties } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' image_id = 'fake_image_id' user_id = 'fake_user' request_id = 'fake_request_id' self.task = self.task_factory.new_task(self.task_type, TENANT1, image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.context = mock.Mock() self.img_repo = mock.Mock() self.task_repo = mock.Mock() self.img_factory = mock.Mock() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) def test_introspect_success(self): image_create = introspect._Introspect(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: result = json.dumps({ "virtual-size": 10737418240, "filename": "/tmp/image.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 373030912, "format-specific": { "type": "qcow2", "data": { "compat": "0.10" } }, "dirty-flag": False }) exc_mock.return_value = (result, None) image_create.execute(image, '/test/path.qcow2') self.assertEqual(10737418240, image.virtual_size) # NOTE(hemanthm): Assert that process limits are being applied on # "qemu-img info" calls. See bug #1449062 for more details. kw_args = exc_mock.call_args[1] self.assertIn('prlimit', kw_args) self.assertEqual(async_utils.QEMU_IMG_PROC_LIMITS, kw_args.get('prlimit')) def test_introspect_no_image(self): image_create = introspect._Introspect(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image # NOTE(flaper87): Don't mock, test the error. with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = (None, "some error") # NOTE(flaper87): Pls, read the `OptionalTask._catch_all` # docs to know why this is commented. # self.assertRaises(RuntimeError, # image_create.execute, # image, '/test/path.qcow2') image_create.execute(image, '/test/path.qcow2') self.assertIsNone(image.virtual_size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_location_import.py0000664000175000017500000005500200000000000025653 0ustar00zuulzuul00000000000000# Copyright 2024 RedHat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import io from unittest import mock import glance_store as store from oslo_config import cfg from oslo_utils import units import glance.async_.flows.location_import as import_flow from glance.common import exception from glance import context import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF BASE_URI = unit_test_utils.BASE_URI TASK_TYPE = 'location_import' TASK_ID1 = 'dbbe7231-020f-4311-87e1-5aaa6da56c02' IMAGE_ID1 = '41f5b3b0-f54c-4cef-bd45-ce3e376a142f' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestCalculateHashTask(test_utils.BaseTestCase): def setUp(self): super(TestCalculateHashTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.hash_task_input = { 'image_id': IMAGE_ID1, } self.image_repo = mock.MagicMock() self.image = self.image_repo.get.return_value self.image.image_id = IMAGE_ID1 self.image.disk_format = 'raw' self.image.container_format = 'bare' self.config(do_secure_hash=True) self.config(http_retries='3') self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) def test_execute_calculate_hash(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' hashing_algo = CONF.hashing_algorithm location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo) hash_calculation.execute() self.assertIsNotNone(self.image.checksum) self.assertIsNotNone(self.image.os_hash_algo) self.assertIsNotNone(self.image.os_hash_value) self.assertEqual('active', self.image.status) def test_hash_calculation_retry_count(self): hashing_algo = CONF.hashing_algorithm self.image.checksum = None self.image.os_hash_value = None hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo, status='importing') self.image.get_data.side_effect = IOError self.config(http_retries='10') expected_msg = ("Hash calculation failed for image .* data") self.assertRaisesRegex(import_flow._HashCalculationFailed, expected_msg, hash_calculation.execute) self.assertEqual(CONF.http_retries, self.image.get_data.call_count) self.assertEqual(CONF.hashing_algorithm, self.image.os_hash_algo) self.assertIsNone(self.image.checksum) self.assertIsNone(self.image.os_hash_value) hash_calculation.revert(None) self.assertIsNone(self.image.os_hash_algo) def test_execute_hash_calculation_fails_without_validation_data(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' self.hash_task_input.update(loc_url=self.loc_url) self.image.checksum = None self.image.os_hash_value = None location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) # Since Image is mocked here, self.image.locations will not be # set hence setting it here to check that it's not popped out # even after CalculateHash failure self.image.locations = ['%s/fake_location_1' % (BASE_URI)] set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) hashing_algo = CONF.hashing_algorithm hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo) self.image.get_data.side_effect = IOError with mock.patch.object(import_flow.LOG, 'debug') as mock_debug: hash_calculation.execute() debug_logs = mock_debug.call_args_list self.assertIn(("[%i/%i] Hash calculation failed due to %s", 1, 3, ''), debug_logs[0]) self.assertEqual(CONF.hashing_algorithm, self.image.os_hash_algo) self.assertIsNone(self.image.checksum) self.assertIsNone(self.image.os_hash_value) self.assertEqual('active', self.image.status) self.assertEqual(1, len(self.image.locations)) hash_calculation.revert(None) self.assertIsNone(self.image.os_hash_algo) self.assertEqual('active', self.image.status) self.assertEqual(1, len(self.image.locations)) # Hash Calculation failed when image is 'active'. # exception will not be raised instead there will be warning log self.image.get_data.side_effect = IOError with mock.patch.object(import_flow.LOG, 'warning') as mock_warn: hash_calculation.execute() msg = ("Hash calculation failed for image %s data" % IMAGE_ID1) mock_warn.assert_called_once_with(msg) self.assertEqual(CONF.hashing_algorithm, self.image.os_hash_algo) self.assertIsNone(self.image.checksum) self.assertIsNone(self.image.os_hash_value) self.assertEqual('active', self.image.status) self.assertEqual(1, len(self.image.locations)) hash_calculation.revert(None) self.assertIsNone(self.image.os_hash_algo) self.assertEqual('active', self.image.status) self.assertEqual(1, len(self.image.locations)) def test_execute_hash_calculation_fails_for_store_other_that_http(self): self.loc_url = "cinder://image/fake_location" self.hash_task_input.update(loc_url=self.loc_url) self.image.status = 'queued' self.image.checksum = None self.image.os_hash_value = None location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) # Since Image is mocked here, self.image.locations will not be # set hence setting it here to check that it's not popped out # even after CalculateHash failure self.image.locations = [{'url': 'cinder://image/fake_location'}] hashing_algo = CONF.hashing_algorithm hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo, status='importing') self.image.get_data.side_effect = IOError expected_msg = ("Hash calculation failed for image .* data") self.assertRaisesRegex(import_flow._HashCalculationFailed, expected_msg, hash_calculation.execute) self.assertEqual(CONF.hashing_algorithm, self.image.os_hash_algo) self.assertIsNone(self.image.checksum) self.assertIsNone(self.image.os_hash_value) self.assertEqual('importing', self.image.status) self.assertEqual(1, len(self.image.locations)) hash_calculation.revert(None) self.assertIsNone(self.image.os_hash_algo) self.assertEqual('queued', self.image.status) self.assertEqual(0, len(self.image.locations)) def test_execute_hash_calculation_fails_if_image_data_deleted(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' self.hash_task_input.update(loc_url=self.loc_url) self.image.checksum = None self.image.os_hash_value = None location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) hashing_algo = CONF.hashing_algorithm hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo) self.image.get_data.side_effect = store.exceptions.NotFound hash_calculation.execute() # Check if Image delete and image_repo.delete has been called # if exception raised self.image.delete.assert_called_once() self.image_repo.remove.assert_called_once_with(self.image) class TestVerifyValidationDataTask(test_utils.BaseTestCase): def setUp(self): super(TestVerifyValidationDataTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.val_data_task_input = { 'image_id': IMAGE_ID1, } self.image_repo = mock.MagicMock() self.image = self.image_repo.get.return_value self.image.image_id = IMAGE_ID1 self.image.disk_format = 'raw' self.image.container_format = 'bare' self.config(do_secure_hash=True) def test_execute_with_valid_validation_data(self): url = '%s/fake_location_1' % BASE_URI self.image.status = 'queued' self.image.locations = {"url": url, "metadata": {"store": "foo"}} expected_size = 4 * units.Ki expected_data = b"*" * expected_size self.image.get_data.return_value = io.BytesIO(expected_data) hash_value = hashlib.sha512(expected_data).hexdigest() hashing_algo = CONF.hashing_algorithm self.image.checksum = None self.image.os_hash_value = None val_data = { 'os_hash_algo': hashing_algo, 'os_hash_value': hash_value } self.val_data_task_input.update(val_data=val_data) hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo, status='importing') hash_calculation.execute() self.image.os_hash_algo = val_data.get("os_hash_algo", hashing_algo) verify_validation_data = import_flow._VerifyValidationData( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, val_data) verify_validation_data.execute() self.assertEqual('sha512', self.image.os_hash_algo) self.assertEqual(hash_value, self.image.os_hash_value) self.assertEqual('importing', self.image.status) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) def test_execute_with_os_hash_value_other_than_512(self): url = '%s/fake_location_1' % BASE_URI self.image.status = 'queued' self.image.locations = {"url": url, "metadata": {"store": "foo"}} expected_size = 4 * units.Ki expected_data = b"*" * expected_size self.image.get_data.return_value = io.BytesIO(expected_data) hash_value = hashlib.sha256(expected_data).hexdigest() hashing_algo = 'sha256' self.image.checksum = None self.image.os_hash_value = None val_data = { 'os_hash_algo': 'sha256', 'os_hash_value': hash_value } hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo, status='importing') hash_calculation.execute() self.val_data_task_input.update(val_data=val_data) verify_validation_data = import_flow._VerifyValidationData( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, val_data) verify_validation_data.execute() self.assertEqual('sha256', self.image.os_hash_algo) self.assertEqual(hash_value, self.image.os_hash_value) self.assertEqual('importing', self.image.status) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) def test_execute_with_invalid_validation_data(self): url = '%s/fake_location_1' % BASE_URI self.image.status = 'queued' self.image.locations = [{"url": url, "metadata": {"store": "foo"}}] expected_size = 4 * units.Ki expected_data = b"*" * expected_size self.image.get_data.return_value = io.BytesIO(expected_data) hashing_algo = CONF.hashing_algorithm val_data = { 'os_hash_algo': hashing_algo, 'os_hash_value': hashlib.sha512(b'image_service').hexdigest() } hash_calculation = import_flow._CalculateHash(TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, hashing_algo, status='importing') hash_calculation.execute() self.assertEqual('importing', self.image.status) self.assertEqual(1, len(self.image.locations)) verify_validation_data = import_flow._VerifyValidationData( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, val_data) expected_msg = ("os_hash_value: .* not matched with actual " "os_hash_value: .*") self.assertRaisesRegex(exception.InvalidParameterValue, expected_msg, verify_validation_data.execute) verify_validation_data.revert(None) self.assertIsNone(self.image.os_hash_algo) self.assertIsNone(self.image.os_hash_value) self.assertIsNone(self.image.checksum) self.assertEqual('queued', self.image.status) class TestSetHashValuesTask(test_utils.BaseTestCase): def setUp(self): super(TestSetHashValuesTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.hash_task_input = { 'image_id': IMAGE_ID1, } self.image_repo = mock.MagicMock() self.image = self.image_repo.get.return_value self.image.image_id = IMAGE_ID1 self.image.disk_format = 'raw' self.image.container_format = 'bare' def test_execute_with_valid_validation_data(self): url = '%s/fake_location_1' % BASE_URI self.image.status = 'queued' self.image.locations = {"url": url, "metadata": {"store": "foo"}} expected_size = 4 * units.Ki expected_data = b"*" * expected_size self.image.get_data.return_value = io.BytesIO(expected_data) hash_value = hashlib.sha512(expected_data).hexdigest() val_data = { 'os_hash_algo': 'sha512', 'os_hash_value': hash_value } self.hash_task_input.update(val_data=val_data) set_hash_data = import_flow._SetHashValues( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, val_data) set_hash_data.execute() self.assertEqual('sha512', self.image.os_hash_algo) self.assertEqual(hash_value, self.image.os_hash_value) self.assertEqual('queued', self.image.status) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) class TestUpdateLocationTask(test_utils.BaseTestCase): def setUp(self): super(TestUpdateLocationTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.location_task_input = { 'image_id': IMAGE_ID1, } self.image_repo = mock.MagicMock() self.image = self.image_repo.get.return_value self.image.image_id = IMAGE_ID1 self.image.disk_format = 'raw' self.image.container_format = 'bare' self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) def test_execute_with_valid_location(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' self.location_task_input.update(loc_url=self.loc_url) location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) def test_execute_with_invalid_location(self): self.image.locations.append.side_effect = exception.BadStoreUri loc_url = 'bogus_url' self.image.status = 'queued' self.location_task_input.update(loc_url=loc_url) location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, loc_url, self.context) self.assertRaises(import_flow._InvalidLocation, location_update.execute) self.assertEqual('queued', self.image.status) class TestSetImageToActiveTask(test_utils.BaseTestCase): def setUp(self): super(TestSetImageToActiveTask, self).setUp() self.task_repo = mock.MagicMock() self.task = mock.MagicMock() self.set_status_task_input = { 'image_id': IMAGE_ID1, } self.image_repo = mock.MagicMock() self.image = self.image_repo.get.return_value self.image.image_id = IMAGE_ID1 self.image.disk_format = 'raw' self.image.container_format = 'bare' self.context = context.RequestContext(user_id=TENANT1, project_id=TENANT1, overwrite=False) def test_execute_set_image_to_active_state(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' self.set_status_task_input.update(loc_url=self.loc_url) location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) self.assertEqual('queued', self.image.status) set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) set_image_active.execute() self.assertEqual('active', self.image.status) def test_execute_set_image_to_active_state_failure(self): self.loc_url = '%s/fake_location_1' % (BASE_URI) self.image.status = 'queued' self.set_status_task_input.update(loc_url=self.loc_url) location_update = import_flow._UpdateLocationTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1, self.loc_url, self.context) location_update.execute() self.assertEqual(1, self.image.locations.append.call_count) self.assertEqual('queued', self.image.status) # Test if image failed while saving to active state self.image_repo.save.side_effect = ValueError set_image_active = import_flow._SetImageToActiveTask( TASK_ID1, TASK_TYPE, self.image_repo, IMAGE_ID1) self.assertRaises(ValueError, set_image_active.execute) # Test revert where location added in previous task is popped # out incase of this task failure which didn't set image status # 'active'. self.image_repo.save.side_effect = None self.image.status = 'queued' set_image_active.revert(None) self.assertEqual(0, self.image.locations.pop.call_count) self.assertEqual('queued', self.image.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_ovf_process.py0000664000175000017500000001571400000000000025007 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import shutil import tarfile import tempfile from unittest import mock from defusedxml.ElementTree import ParseError from glance.async_.flows import ovf_process import glance.tests.utils as test_utils from oslo_config import cfg class TestOvfProcessTask(test_utils.BaseTestCase): def setUp(self): super(TestOvfProcessTask, self).setUp() # The glance/tests/var dir containing sample ova packages used # by the tests in this class self.test_ova_dir = os.path.abspath(os.path.join( os.path.dirname(__file__), '../../../', 'var')) self.tempdir = tempfile.mkdtemp() self.config(work_dir=self.tempdir, group="task") # These are the properties that we will extract from the ovf # file contained in a ova package interested_properties = ( '{\n' ' "cim_pasd": [\n' ' "InstructionSetExtensionName",\n' ' "ProcessorArchitecture"]\n' '}\n') self.config_file_name = os.path.join(self.tempdir, 'ovf-metadata.json') with open(self.config_file_name, 'w') as config_file: config_file.write(interested_properties) self.image = mock.Mock() self.image.container_format = 'ova' self.image.context.is_admin = True self.img_repo = mock.Mock() self.img_repo.get.return_value = self.image def tearDown(self): if os.path.exists(self.tempdir): shutil.rmtree(self.tempdir) super(TestOvfProcessTask, self).tearDown() def _copy_ova_to_tmpdir(self, ova_name): # Copies an ova package to the tempdir from which # it will be read by the system-under-test shutil.copy(os.path.join(self.test_ova_dir, ova_name), self.tempdir) return os.path.join(self.tempdir, ova_name) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_success(self, mock_find_file): mock_find_file.return_value = self.config_file_name ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) # Note that the extracted disk image is overwritten onto the input ova # file with open(ova_file_path, 'rb') as disk_image_file: content = disk_image_file.read() # b'ABCD' is the exact contents of the disk image file # testserver-disk1.vmdk contained in the testserver.ova package used # by this test self.assertEqual(b'ABCD', content) # 'DMTF:x86:VT-d' is the value in the testerver.ovf file in the # testserver.ova package self.image.extra_properties.update.assert_called_once_with( {'cim_pasd_InstructionSetExtensionName': 'DMTF:x86:VT-d'}) self.assertEqual('bare', self.image.container_format) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_no_config_file(self, mock_find_file): # Mimics a Glance deployment without the ovf-metadata.json file mock_find_file.return_value = None ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) # Note that the extracted disk image is overwritten onto the input # ova file. with open(ova_file_path, 'rb') as disk_image_file: content = disk_image_file.read() # b'ABCD' is the exact contents of the disk image file # testserver-disk1.vmdk contained in the testserver.ova package used # by this test self.assertEqual(b'ABCD', content) # No properties must be selected from the ovf file self.image.extra_properties.update.assert_called_once_with({}) self.assertEqual('bare', self.image.container_format) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_not_admin(self, mock_find_file): mock_find_file.return_value = self.config_file_name ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path self.image.context.is_admin = False oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertRaises(RuntimeError, oprocess.execute, 'test_image_id', ova_uri) def test_extract_ova_not_tar(self): # testserver-not-tar.ova package is not in tar format ova_file_path = os.path.join(self.test_ova_dir, 'testserver-not-tar.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(tarfile.ReadError, iextractor.extract, ova_file) def test_extract_ova_no_disk(self): # testserver-no-disk.ova package contains no disk image file ova_file_path = os.path.join(self.test_ova_dir, 'testserver-no-disk.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(KeyError, iextractor.extract, ova_file) def test_extract_ova_no_ovf(self): # testserver-no-ovf.ova package contains no ovf file ova_file_path = os.path.join(self.test_ova_dir, 'testserver-no-ovf.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(RuntimeError, iextractor.extract, ova_file) def test_extract_ova_bad_ovf(self): # testserver-bad-ovf.ova package has an ovf file that contains # invalid xml ova_file_path = os.path.join(self.test_ova_dir, 'testserver-bad-ovf.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(ParseError, iextractor._parse_OVF, ova_file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/flows/test_web_download.py0000664000175000017500000001472200000000000025121 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance_store._drivers import filesystem from oslo_config import cfg from glance.async_.flows._internal_plugins import web_download from glance.async_.flows import api_image_import import glance.common.exception import glance.common.scripts.utils as script_utils from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestWebDownloadTask(test_utils.BaseTestCase): def setUp(self): super(TestWebDownloadTask, self).setUp() self.config(node_staging_uri='/tmp/staging') self.image_repo = mock.MagicMock() self.image_id = mock.MagicMock() self.uri = mock.MagicMock() self.task_factory = domain.TaskFactory() task_input = { "import_req": { 'method': { 'name': 'web_download', 'uri': 'http://cloud.foo/image.qcow2' } } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' request_id = 'fake_request_id' user_id = 'fake_user' self.task = self.task_factory.new_task(self.task_type, TENANT1, self.image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.task_id = self.task.task_id self.action_wrapper = api_image_import.ImportActionWrapper( self.image_repo, self.image_id, self.task_id) self.web_download_task = web_download._WebDownload( self.task.task_id, self.task_type, self.uri, self.action_wrapper, ['foo']) self.image_repo.get.return_value = mock.MagicMock( extra_properties={'os_glance_import_task': self.task_id}) @mock.patch.object(filesystem.Store, 'add') def test_web_download(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_add.return_value = ["path", 4] mock_iter.return_value.headers = {} self.assertEqual(self.web_download_task.execute(), "path") mock_add.assert_called_once_with(self.image_id, mock_iter.return_value, 0) @mock.patch.object(filesystem.Store, 'add') def test_web_download_with_content_length(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_iter.return_value.headers = {'content-length': '4'} mock_add.return_value = ["path", 4] self.assertEqual(self.web_download_task.execute(), "path") mock_add.assert_called_once_with(self.image_id, mock_iter.return_value, 0) @mock.patch.object(filesystem.Store, 'add') def test_web_download_with_invalid_content_length(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_iter.return_value.headers = {'content-length': "not_valid"} mock_add.return_value = ["path", 4] self.assertEqual(self.web_download_task.execute(), "path") mock_add.assert_called_once_with(self.image_id, mock_iter.return_value, 0) @mock.patch.object(filesystem.Store, 'add') def test_web_download_fails_when_data_size_different(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_iter.return_value.headers = {'content-length': '4'} mock_add.return_value = ["path", 3] self.assertRaises( glance.common.exception.ImportTaskError, self.web_download_task.execute) def test_web_download_failed(self): with mock.patch.object(script_utils, "get_image_data_iter") as mock_iter: mock_iter.side_effect = glance.common.exception.NotFound self.assertRaises(glance.common.exception.NotFound, self.web_download_task.execute) @mock.patch.object(filesystem.Store, 'add') def test_web_download_check_content_length(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_add.return_value = ["path", 4] mock_iter.return_value.headers = {'content-length': '4'} self.assertEqual(self.web_download_task.execute(), "path") mock_add.assert_called_once_with(self.image_id, mock_iter.return_value, 0) @mock.patch.object(filesystem.Store, 'add') def test_web_download_invalid_content_length(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_add.return_value = ["path", 4] mock_iter.return_value.headers = {'content-length': 'not_valid'} self.assertEqual(self.web_download_task.execute(), "path") mock_add.assert_called_once_with(self.image_id, mock_iter.return_value, 0) @mock.patch.object(filesystem.Store, 'add') def test_web_download_wrong_content_length(self, mock_add): with mock.patch.object(script_utils, 'get_image_data_iter') as mock_iter: mock_add.return_value = ["path", 2] mock_iter.return_value.headers = {'content-length': '4'} self.assertRaises(glance.common.exception.ImportTaskError, self.web_download_task.execute) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/test_async.py0000664000175000017500000002734700000000000022447 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import futurist import glance_store as store from oslo_config import cfg from taskflow.patterns import linear_flow import glance.async_ from glance.async_.flows import api_image_import import glance.tests.utils as test_utils CONF = cfg.CONF class TestTaskExecutor(test_utils.BaseTestCase): def setUp(self): super(TestTaskExecutor, self).setUp() self.context = mock.Mock() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() self.executor = glance.async_.TaskExecutor(self.context, self.task_repo, self.image_repo, self.image_factory) def test_begin_processing(self): # setup task_id = mock.ANY task_type = mock.ANY task = mock.Mock() with mock.patch.object( glance.async_.TaskExecutor, '_run') as mock_run: self.task_repo.get.return_value = task self.executor.begin_processing(task_id) # assert the call mock_run.assert_called_once_with(task_id, task_type) def test_with_admin_repo(self): admin_repo = mock.MagicMock() executor = glance.async_.TaskExecutor(self.context, self.task_repo, self.image_repo, self.image_factory, admin_repo=admin_repo) self.assertEqual(admin_repo, executor.admin_repo) class TestImportTaskFlow(test_utils.BaseTestCase): def setUp(self): super(TestImportTaskFlow, self).setUp() store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(enabled_import_methods=[ 'glance-direct', 'web-download', 'copy-image']) self.config(node_staging_uri='file:///tmp/staging') store.create_stores(CONF) self.base_flow = ['ImageLock', 'ConfigureStaging', 'ImportToStore', 'DeleteFromFS', 'VerifyImageState', 'CompleteTask'] self.import_plugins = ['Convert_Image', 'Decompress_Image', 'InjectMetadataProperties'] def _get_flow(self, import_req=None): inputs = { 'task_id': mock.sentinel.task_id, 'task_type': mock.MagicMock(), 'task_repo': mock.MagicMock(), 'image_repo': mock.MagicMock(), 'image_id': mock.MagicMock(), 'import_req': import_req or mock.MagicMock(), 'context': mock.MagicMock(), } inputs['image_repo'].get.return_value = mock.MagicMock( extra_properties={'os_glance_import_task': mock.sentinel.task_id}) flow = api_image_import.get_flow(**inputs) return flow def _get_flow_tasks(self, flow): flow_comp = [] for c, p in flow.iter_nodes(): if isinstance(c, linear_flow.Flow): flow_comp += self._get_flow_tasks(c) else: name = str(c).split('-') if len(name) > 1: flow_comp.append(name[1]) return flow_comp def test_get_default_flow(self): # This test will ensure that without import plugins # and without internal plugins flow builds with the # base_flow components flow = self._get_flow() flow_comp = self._get_flow_tasks(flow) # assert flow has all the tasks self.assertEqual(len(self.base_flow), len(flow_comp)) for c in self.base_flow: self.assertIn(c, flow_comp) def test_get_flow_web_download_enabled(self): # This test will ensure that without import plugins # and with web-download plugin flow builds with # base_flow components and '_WebDownload' import_req = { 'method': { 'name': 'web-download', 'uri': 'http://cloud.foo/image.qcow2' } } flow = self._get_flow(import_req=import_req) flow_comp = self._get_flow_tasks(flow) # assert flow has all the tasks self.assertEqual(len(self.base_flow) + 1, len(flow_comp)) for c in self.base_flow: self.assertIn(c, flow_comp) self.assertIn('WebDownload', flow_comp) @mock.patch.object(store, 'get_store_from_store_identifier') def test_get_flow_copy_image_enabled(self, mock_store): # This test will ensure that without import plugins # and with copy-image plugin flow builds with # base_flow components and '_CopyImage' import_req = { 'method': { 'name': 'copy-image', 'stores': ['fake-store'] } } mock_store.return_value = mock.Mock() flow = self._get_flow(import_req=import_req) flow_comp = self._get_flow_tasks(flow) # assert flow has all the tasks self.assertEqual(len(self.base_flow) + 1, len(flow_comp)) for c in self.base_flow: self.assertIn(c, flow_comp) self.assertIn('CopyImage', flow_comp) def test_get_flow_with_all_plugins_enabled(self): # This test will ensure that flow includes import plugins # and base flow self.config(image_import_plugins=['image_conversion', 'image_decompression', 'inject_image_metadata'], group='image_import_opts') flow = self._get_flow() flow_comp = self._get_flow_tasks(flow) # assert flow has all the tasks (base_flow + plugins) plugins = CONF.image_import_opts.image_import_plugins self.assertEqual(len(self.base_flow) + len(plugins), len(flow_comp)) for c in self.base_flow: self.assertIn(c, flow_comp) for c in self.import_plugins: self.assertIn(c, flow_comp) @mock.patch.object(store, 'get_store_from_store_identifier') def test_get_flow_copy_image_not_includes_import_plugins( self, mock_store): # This test will ensure that flow does not includes import # plugins as import method is copy image self.config(image_import_plugins=['image_conversion', 'image_decompression', 'inject_image_metadata'], group='image_import_opts') mock_store.return_value = mock.Mock() import_req = { 'method': { 'name': 'copy-image', 'stores': ['fake-store'] } } flow = self._get_flow(import_req=import_req) flow_comp = self._get_flow_tasks(flow) # assert flow has all the tasks (just base and conversion) self.assertEqual(len(self.base_flow) + 1, len(flow_comp)) for c in self.base_flow: self.assertIn(c, flow_comp) self.assertIn('CopyImage', flow_comp) @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) class TestSystemThreadPoolModel(test_utils.BaseTestCase): def test_eventlet_model(self): model_cls = glance.async_.EventletThreadPoolModel self.assertEqual(futurist.GreenThreadPoolExecutor, model_cls.get_threadpool_executor_class()) def test_native_model(self): model_cls = glance.async_.NativeThreadPoolModel self.assertEqual(futurist.ThreadPoolExecutor, model_cls.get_threadpool_executor_class()) @mock.patch('glance.async_.ThreadPoolModel.get_threadpool_executor_class') def test_base_model_spawn(self, mock_gte): pool_cls = mock.MagicMock() pool_cls.configure_mock(__name__='fake') mock_gte.return_value = pool_cls model = glance.async_.ThreadPoolModel() result = model.spawn(print, 'foo', bar='baz') pool = pool_cls.return_value # Make sure the default size was passed to the executor pool_cls.assert_called_once_with(1) # Make sure we submitted the function to the executor pool.submit.assert_called_once_with(print, 'foo', bar='baz') # This isn't used anywhere, but make sure we get the future self.assertEqual(pool.submit.return_value, result) def test_model_map(self): model = glance.async_.EventletThreadPoolModel() results = model.map(lambda s: s.upper(), ['a', 'b', 'c']) self.assertEqual(['A', 'B', 'C'], list(results)) @mock.patch('glance.async_.ThreadPoolModel.get_threadpool_executor_class') def test_base_model_init_with_size(self, mock_gte): mock_gte.return_value.__name__ = 'TestModel' with mock.patch.object(glance.async_, 'LOG') as mock_log: glance.async_.ThreadPoolModel(123) mock_log.debug.assert_called_once_with( 'Creating threadpool model %r with size %i', 'TestModel', 123) mock_gte.return_value.assert_called_once_with(123) def test_set_threadpool_model_native(self): glance.async_.set_threadpool_model('native') self.assertEqual(glance.async_.NativeThreadPoolModel, glance.async_._THREADPOOL_MODEL) def test_set_threadpool_model_eventlet(self): glance.async_.set_threadpool_model('eventlet') self.assertEqual(glance.async_.EventletThreadPoolModel, glance.async_._THREADPOOL_MODEL) def test_set_threadpool_model_unknown(self): # Unknown threadpool models are not tolerated self.assertRaises(RuntimeError, glance.async_.set_threadpool_model, 'danthread9000') def test_set_threadpool_model_again(self): # Setting the model to the same thing is fine glance.async_.set_threadpool_model('native') glance.async_.set_threadpool_model('native') def test_set_threadpool_model_different(self): glance.async_.set_threadpool_model('native') # The model cannot be switched at runtime self.assertRaises(RuntimeError, glance.async_.set_threadpool_model, 'eventlet') def test_set_threadpool_model_log(self): with mock.patch.object(glance.async_, 'LOG') as mock_log: glance.async_.set_threadpool_model('eventlet') mock_log.info.assert_called_once_with( 'Threadpool model set to %r', 'EventletThreadPoolModel') def test_get_threadpool_model(self): glance.async_.set_threadpool_model('native') self.assertEqual(glance.async_.NativeThreadPoolModel, glance.async_.get_threadpool_model()) def test_get_threadpool_model_unset(self): # If the model is not set, we get an AssertionError self.assertRaises(AssertionError, glance.async_.get_threadpool_model) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/test_taskflow_executor.py0000664000175000017500000001447000000000000025073 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import futurist import glance_store from oslo_config import cfg from taskflow import engines import glance.async_ from glance.async_ import taskflow_executor from glance.common.scripts.image_import import main as image_import from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestTaskExecutor(test_utils.BaseTestCase): def setUp(self): # NOTE(danms): Makes sure that we have a model set to something glance.async_._THREADPOOL_MODEL = None glance.async_.set_threadpool_model('eventlet') super(TestTaskExecutor, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) self.config(engine_mode='serial', group='taskflow_executor') self.context = mock.Mock() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' image_id = 'fake-image-id' request_id = 'fake_request_id' user_id = 'fake_user' self.task_factory = domain.TaskFactory() self.task = self.task_factory.new_task(self.task_type, TENANT1, image_id, user_id, request_id, task_time_to_live=task_ttl, task_input=task_input) self.executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.image_repo, self.image_factory) def test_fetch_an_executor_parallel(self): self.config(engine_mode='parallel', group='taskflow_executor') pool = self.executor._fetch_an_executor() self.assertIsInstance(pool, futurist.GreenThreadPoolExecutor) def test_fetch_an_executor_serial(self): pool = self.executor._fetch_an_executor() self.assertIsNone(pool) def test_begin_processing(self): with mock.patch.object(engines, 'load') as load_mock: engine = mock.Mock() load_mock.return_value = engine self.task_repo.get.return_value = self.task self.executor.begin_processing(self.task.task_id) # assert the call self.assertEqual(1, load_mock.call_count) self.assertEqual(1, engine.run.call_count) def test_task_fail(self): with mock.patch.object(engines, 'load') as load_mock: engine = mock.Mock() load_mock.return_value = engine engine.run.side_effect = RuntimeError self.task_repo.get.return_value = self.task self.assertRaises(RuntimeError, self.executor.begin_processing, self.task.task_id) self.assertEqual('failure', self.task.status) self.task_repo.save.assert_called_with(self.task) def test_task_fail_upload(self): with mock.patch.object(image_import, 'set_image_data') as import_mock: import_mock.side_effect = IOError # noqa self.task_repo.get.return_value = self.task self.executor.begin_processing(self.task.task_id) self.assertEqual('failure', self.task.status) self.task_repo.save.assert_called_with(self.task) self.assertEqual(1, import_mock.call_count) @mock.patch('stevedore.driver.DriverManager') def test_get_flow_with_admin_repo(self, mock_driver): admin_repo = mock.MagicMock() executor = taskflow_executor.TaskExecutor(self.context, self.task_repo, self.image_repo, self.image_factory, admin_repo=admin_repo) self.assertEqual(mock_driver.return_value.driver, executor._get_flow(self.task)) mock_driver.assert_called_once_with( 'glance.flows', self.task.type, invoke_on_load=True, invoke_kwds={'task_id': self.task.task_id, 'task_type': self.task.type, 'context': self.context, 'task_repo': self.task_repo, 'image_repo': self.image_repo, 'image_factory': self.image_factory, 'backend': None, 'admin_repo': admin_repo, 'uri': 'http://cloud.foo/image.qcow2'}) @mock.patch('stevedore.driver.DriverManager') @mock.patch.object(taskflow_executor, 'LOG') def test_get_flow_fails(self, mock_log, mock_driver): mock_driver.side_effect = IndexError('fail') executor = taskflow_executor.TaskExecutor(self.context, self.task_repo, self.image_repo, self.image_factory) self.assertRaises(IndexError, executor._get_flow, self.task) mock_log.exception.assert_called_once_with( 'Task initialization failed: %s', 'fail') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/async_/test_utils.py0000664000175000017500000000563000000000000022461 0ustar00zuulzuul00000000000000# Copyright 2022 OVHcloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.async_ import utils import glance.common.exception from glance.tests.unit import base class TestGetGlanceEndpoint(base.IsolatedUnitTest): def setUp(self): super(TestGetGlanceEndpoint, self).setUp() self.service_catalog = [ { 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://internalURL/', 'publicURL': 'http://publicURL/', }, ], 'type': 'object-store', }, { 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://RegionOneInternal/', 'publicURL': 'http://RegionOnePublic/', }, ], 'type': 'image', }, { 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionTwo', 'internalURL': 'http://RegionTwoInternal/', 'publicURL': 'http://RegionTwoPublic/', }, ], 'type': 'image', } ] self.context = mock.MagicMock(service_catalog=self.service_catalog) def test_return_matching_glance_endpoint(self): self.assertEqual(utils.get_glance_endpoint(self.context, 'RegionOne', 'public'), 'http://RegionOnePublic/') self.assertEqual(utils.get_glance_endpoint(self.context, 'RegionTwo', 'internal'), 'http://RegionTwoInternal/') def test_glance_endpoint_not_found(self): self.assertRaises(glance.common.exception.GlanceEndpointNotFound, utils.get_glance_endpoint, self.context, 'RegionThree', 'public') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/base.py0000664000175000017500000001416700000000000017725 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import glance_store as store from glance_store._drivers import cinder from glance_store._drivers import rbd as rbd_store from glance_store._drivers import swift from glance_store import location from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import options from oslo_serialization import jsonutils from glance.tests import stubs from glance.tests import utils as test_utils CONF = cfg.CONF class StoreClearingUnitTest(test_utils.BaseTestCase): def setUp(self): super(StoreClearingUnitTest, self).setUp() # Ensure stores + locations cleared location.SCHEME_TO_CLS_MAP = {} self._create_stores() self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict()) def _create_stores(self, passing_config=True): """Create known stores. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ store.register_opts(CONF) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores(CONF) class MultiStoreClearingUnitTest(test_utils.BaseTestCase): def setUp(self): super(MultiStoreClearingUnitTest, self).setUp() # Ensure stores + locations cleared location.SCHEME_TO_CLS_BACKEND_MAP = {} self._create_multi_stores() self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict()) def _create_multi_stores(self, passing_config=True): """Create known stores. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ rbd_store.rados = mock.MagicMock() rbd_store.rbd = mock.MagicMock() rbd_store.Store._set_url_prefix = mock.MagicMock() cinder.cinderclient = mock.MagicMock() cinder.Store.get_cinderclient = mock.MagicMock() swift.swiftclient = mock.MagicMock() swift.BaseStore.get_store_connection = mock.MagicMock() self.config(enabled_backends={'fast': 'file', 'cheap': 'file', 'readonly_store': 'http', 'fast-cinder': 'cinder', 'fast-rbd': 'rbd', 'reliable': 'swift'}) store.register_store_opts(CONF) self.config(default_backend='fast', group='glance_store') self.config(filesystem_store_datadir=self.test_dir, filesystem_thin_provisioning=False, filesystem_store_chunk_size=65536, group='fast') self.config(filesystem_store_datadir=self.test_dir2, filesystem_thin_provisioning=False, filesystem_store_chunk_size=65536, group='cheap') self.config(rbd_store_chunk_size=8688388, rbd_store_pool='images', rbd_thin_provisioning=False, group='fast-rbd') self.config(cinder_volume_type='lvmdriver-1', cinder_use_multipath=False, group='fast-cinder') self.config(swift_store_container='glance', swift_store_large_object_size=524288000, swift_store_large_object_chunk_size=204800000, group='reliable') store.create_multi_stores(CONF) class IsolatedUnitTest(StoreClearingUnitTest): """ Unit test case that establishes a mock environment within a testing directory (in isolation) """ def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores() def fake_get_conection_type(client): DEFAULT_API_PORT = 9292 if client.port == DEFAULT_API_PORT: return stubs.FakeGlanceConnection self.patcher = mock.patch( 'glance.common.client.BaseClient.get_connection_type', fake_get_conection_type) self.addCleanup(self.patcher.stop) self.patcher.start() def set_policy_rules(self, rules): fap = open(CONF.oslo_policy.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() class MultiIsolatedUnitTest(MultiStoreClearingUnitTest): """ Unit test case that establishes a mock environment within a testing directory (in isolation) """ def setUp(self): super(MultiIsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(debug=False) def set_policy_rules(self, rules): fap = open(CONF.oslo_policy.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() def mock_object(self, obj, attr_name, *args, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ patcher = mock.patch.object(obj, attr_name, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9063087 glance-29.0.0/glance/tests/unit/cmd/0000775000175000017500000000000000000000000017173 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/cmd/__init__.py0000664000175000017500000000000000000000000021272 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/cmd/test_status.py0000664000175000017500000000575600000000000022144 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from oslo_config import cfg from oslo_upgradecheck import upgradecheck from glance.cmd.status import Checks from glance.tests import utils as test_utils CONF = cfg.CONF class TestUpgradeChecks(test_utils.BaseTestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() glance_store.register_opts(CONF) self.checker = Checks() def test_sheepdog_removal_no_config(self): self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) def test_sheepdog_removal_enabled_backends(self): self.config(enabled_backends=None) self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(enabled_backends={}) self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(enabled_backends={'foo': 'bar'}) self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(enabled_backends={'sheepdog': 'foobar'}) self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.FAILURE) def test_sheepdog_removal_glance_store_stores(self): self.config(stores=None, group='glance_store') self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(stores='', group='glance_store') self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(stores='foo', group='glance_store') self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.SUCCESS) self.config(stores='sheepdog', group='glance_store') self.assertEqual(self.checker._check_sheepdog_store().code, upgradecheck.Code.FAILURE) def test_owner_is_tenant_removal(self): self.config(owner_is_tenant=True) self.assertEqual(self.checker._check_owner_is_tenant().code, upgradecheck.Code.SUCCESS) self.config(owner_is_tenant=False) self.assertEqual(self.checker._check_owner_is_tenant().code, upgradecheck.Code.FAILURE) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/common/0000775000175000017500000000000000000000000017720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/__init__.py0000664000175000017500000000000000000000000022017 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/common/scripts/0000775000175000017500000000000000000000000021407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/scripts/__init__.py0000664000175000017500000000000000000000000023506 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/common/scripts/image_import/0000775000175000017500000000000000000000000024063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/scripts/image_import/__init__.py0000664000175000017500000000000000000000000026162 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/scripts/image_import/test_main.py0000664000175000017500000001453300000000000026426 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import urllib import glance.common.exception as exception from glance.common.scripts.image_import import main as image_import_script from glance.common.scripts import utils from glance.common import store_utils import glance.tests.utils as test_utils class TestImageImport(test_utils.BaseTestCase): def setUp(self): super(TestImageImport, self).setUp() def test_run(self): with mock.patch.object(image_import_script, '_execute') as mock_execute: task_id = mock.ANY context = mock.ANY task_repo = mock.ANY image_repo = mock.ANY image_factory = mock.ANY image_import_script.run(task_id, context, task_repo, image_repo, image_factory) mock_execute.assert_called_once_with(task_id, task_repo, image_repo, image_factory) def test_import_image(self): image_id = mock.ANY image = mock.Mock(image_id=image_id) image_repo = mock.Mock() image_repo.get.return_value = image image_factory = mock.ANY task_input = mock.Mock(image_properties=mock.ANY) uri = mock.ANY with mock.patch.object(image_import_script, 'create_image') as mock_create_image: with mock.patch.object(image_import_script, 'set_image_data') as mock_set_img_data: mock_create_image.return_value = image self.assertEqual( image_id, image_import_script.import_image(image_repo, image_factory, task_input, None, uri)) # Check image is in saving state before image_repo.save called self.assertEqual('saving', image.status) self.assertTrue(image_repo.save.called) mock_set_img_data.assert_called_once_with(image, uri, None) self.assertTrue(image_repo.get.called) self.assertTrue(image_repo.save.called) def test_create_image(self): image = mock.ANY image_repo = mock.Mock() image_factory = mock.Mock() image_factory.new_image.return_value = image # Note: include some base properties to ensure no error while # attempting to verify them image_properties = {'disk_format': 'foo', 'id': 'bar'} self.assertEqual(image, image_import_script.create_image(image_repo, image_factory, image_properties, None)) @mock.patch.object(utils, 'get_image_data_iter') def test_set_image_data_http(self, mock_image_iter): uri = 'http://www.example.com' image = mock.Mock() mock_image_iter.return_value = test_utils.FakeHTTPResponse() self.assertIsNone(image_import_script.set_image_data(image, uri, None)) def test_set_image_data_http_error(self): uri = 'blahhttp://www.example.com' image = mock.Mock() self.assertRaises(urllib.error.URLError, image_import_script.set_image_data, image, uri, None) @mock.patch.object(image_import_script, 'create_image') @mock.patch.object(image_import_script, 'set_image_data') @mock.patch.object(store_utils, 'delete_image_location_from_backend') def test_import_image_failed_with_expired_token( self, mock_delete_data, mock_set_img_data, mock_create_image): image_id = mock.ANY locations = ['location'] image = mock.Mock(image_id=image_id, locations=locations) image_repo = mock.Mock() image_repo.get.side_effect = [image, exception.NotAuthenticated] image_factory = mock.ANY task_input = mock.Mock(image_properties=mock.ANY) uri = mock.ANY mock_create_image.return_value = image self.assertRaises(exception.NotAuthenticated, image_import_script.import_image, image_repo, image_factory, task_input, None, uri) self.assertEqual(1, mock_set_img_data.call_count) mock_delete_data.assert_called_once_with( mock_create_image().context, image_id, 'location') @mock.patch('oslo_utils.timeutils.StopWatch') @mock.patch('glance.common.scripts.utils.get_image_data_iter') def test_set_image_data_with_callback(self, mock_gidi, mock_sw): data = [b'0' * 60, b'0' * 50, b'0' * 10, b'0' * 150] result_data = [] mock_gidi.return_value = iter(data) mock_sw.return_value.expired.side_effect = [False, True, False, False] image = mock.MagicMock() callback = mock.MagicMock() def fake_set_data(data_iter, **kwargs): for chunk in data_iter: result_data.append(chunk) image.set_data.side_effect = fake_set_data image_import_script.set_image_data(image, 'http://fake', None, callback=callback) mock_gidi.assert_called_once_with('http://fake') self.assertEqual(data, result_data) # Since we only fired the timer once, only two calls expected # for the four reads we did, including the final obligatory one callback.assert_has_calls([mock.call(110, 110), mock.call(160, 270)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/scripts/test_scripts_utils.py0000664000175000017500000002313700000000000025735 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import urllib from glance.common import exception from glance.common.scripts import utils as script_utils import glance.tests.utils as test_utils class TestScriptsUtils(test_utils.BaseTestCase): def setUp(self): super(TestScriptsUtils, self).setUp() def test_get_task(self): task = mock.ANY task_repo = mock.Mock(return_value=task) task_id = mock.ANY self.assertEqual(task, script_utils.get_task(task_repo, task_id)) def test_unpack_task_input(self): task_input = {"import_from": "foo", "import_from_format": "bar", "image_properties": "baz"} task = mock.Mock(task_input=task_input) self.assertEqual(task_input, script_utils.unpack_task_input(task)) def test_unpack_task_type_location_import(self): task_type = 'location_import' task_input = {'image_id': mock.ANY, 'loc_url': mock.ANY, 'validation_data': {}} task = mock.Mock(type=task_type, task_input=task_input) self.assertEqual(task_input, script_utils.unpack_task_input(task)) def test_unpack_task_type_location_import_error(self): task_type = 'location_import' task_input1 = {'image_id': mock.ANY, 'validation_data': {}} task_input2 = {'loc_url': mock.ANY, 'validation_data': {}} task_input3 = {'image_id': mock.ANY, 'loc_url': mock.ANY} task1 = mock.Mock(type=task_type, task_input=task_input1) task2 = mock.Mock(type=task_type, task_input=task_input2) task3 = mock.Mock(type=task_type, task_input=task_input3) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task1) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task2) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task3) def test_unpack_task_input_error(self): task_input1 = {"import_from_format": "bar", "image_properties": "baz"} task_input2 = {"import_from": "foo", "image_properties": "baz"} task_input3 = {"import_from": "foo", "import_from_format": "bar"} task1 = mock.Mock(task_input=task_input1) task2 = mock.Mock(task_input=task_input2) task3 = mock.Mock(task_input=task_input3) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task1) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task2) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task3) def test_set_base_image_properties(self): properties = {} script_utils.set_base_image_properties(properties) self.assertIn('disk_format', properties) self.assertIn('container_format', properties) self.assertEqual('qcow2', properties['disk_format']) self.assertEqual('bare', properties['container_format']) def test_set_base_image_properties_none(self): properties = None script_utils.set_base_image_properties(properties) self.assertIsNone(properties) def test_set_base_image_properties_not_empty(self): properties = {'disk_format': 'vmdk', 'container_format': 'bare'} script_utils.set_base_image_properties(properties) self.assertIn('disk_format', properties) self.assertIn('container_format', properties) self.assertEqual('vmdk', properties.get('disk_format')) self.assertEqual('bare', properties.get('container_format')) def test_validate_location_http(self): location = 'http://example.com' self.assertEqual(location, script_utils.validate_location_uri(location)) def test_validate_location_https(self): location = 'https://example.com' self.assertEqual(location, script_utils.validate_location_uri(location)) def test_validate_location_none_error(self): self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, '') def test_validate_location_file_location_error(self): self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, "file:///tmp") self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, "filesystem:///tmp") def test_validate_location_unsupported_error(self): location = 'swift' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+http' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+https' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+config' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'vsphere' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'rbd://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'cinder://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) class TestCallbackIterator(test_utils.BaseTestCase): def test_iterator_iterates(self): # Include a zero-length generation to make sure we don't trigger # the callback when nothing new has happened. items = ['1', '2', '', '3'] callback = mock.MagicMock() cb_iter = script_utils.CallbackIterator(iter(items), callback) iter_items = list(cb_iter) callback.assert_has_calls([mock.call(1, 1), mock.call(1, 2), mock.call(1, 3)]) self.assertEqual(items, iter_items) # Make sure we don't call the callback on close if we # have processed all the data callback.reset_mock() cb_iter.close() callback.assert_not_called() @mock.patch('oslo_utils.timeutils.StopWatch') def test_iterator_iterates_granularly(self, mock_sw): items = ['1', '2', '3'] callback = mock.MagicMock() mock_sw.return_value.expired.side_effect = [False, True, False] cb_iter = script_utils.CallbackIterator(iter(items), callback, min_interval=30) iter_items = list(cb_iter) self.assertEqual(items, iter_items) # The timer only fired once, but we should still expect the final # chunk to be emitted. callback.assert_has_calls([mock.call(2, 2), mock.call(1, 3)]) mock_sw.assert_called_once_with(30) mock_sw.return_value.start.assert_called_once_with() mock_sw.return_value.restart.assert_called_once_with() # Make sure we don't call the callback on close if we # have processed all the data callback.reset_mock() cb_iter.close() callback.assert_not_called() def test_proxy_close(self): callback = mock.MagicMock() source = mock.MagicMock() del source.close # NOTE(danms): This will generate AttributeError if it # tries to call close after the del above. script_utils.CallbackIterator(source, callback).close() source = mock.MagicMock() source.close.return_value = 'foo' script_utils.CallbackIterator(source, callback).close() source.close.assert_called_once_with() # We didn't process any data, so no callback should be expected callback.assert_not_called() @mock.patch('oslo_utils.timeutils.StopWatch') def test_proxy_read(self, mock_sw): items = ['1', '2', '3'] source = mock.MagicMock() source.read.side_effect = items callback = mock.MagicMock() mock_sw.return_value.expired.side_effect = [False, True, False] cb_iter = script_utils.CallbackIterator(source, callback, min_interval=30) results = [cb_iter.read(1) for i in range(len(items))] self.assertEqual(items, results) # The timer only fired once while reading, so we only expect # one callback. callback.assert_has_calls([mock.call(2, 2)]) cb_iter.close() # If we close with residue since the last callback, we should # call the callback with that. callback.assert_has_calls([mock.call(2, 2), mock.call(1, 3)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_client.py0000664000175000017500000000543000000000000022611 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from unittest import mock from oslo_log.fixture import logging_error as log_fixture import testtools from glance.common import auth from glance.common import client from glance.tests.unit import fixtures as glance_fixtures from glance.tests import utils class TestClient(testtools.TestCase): def setUp(self): super(TestClient, self).setUp() self.endpoint = 'example.com' self.client = client.BaseClient(self.endpoint, port=9191, auth_token='abc123') # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) def test_make_auth_plugin(self): creds = {'strategy': 'keystone'} insecure = False with mock.patch.object(auth, 'get_plugin_from_strategy'): self.client.make_auth_plugin(creds, insecure) @mock.patch.object(http.client.HTTPConnection, "getresponse") @mock.patch.object(http.client.HTTPConnection, "request") def test_http_encoding_headers(self, _mock_req, _mock_resp): # Lets fake the response # returned by http.client fake = utils.FakeHTTPResponse(data=b"Ok") _mock_resp.return_value = fake headers = {"test": 'ni\xf1o'} resp = self.client.do_request('GET', '/v1/images/detail', headers=headers) self.assertEqual(fake, resp) @mock.patch.object(http.client.HTTPConnection, "getresponse") @mock.patch.object(http.client.HTTPConnection, "request") def test_http_encoding_params(self, _mock_req, _mock_resp): # Lets fake the response # returned by http.client fake = utils.FakeHTTPResponse(data=b"Ok") _mock_resp.return_value = fake params = {"test": 'ni\xf1o'} resp = self.client.do_request('GET', '/v1/images/detail', params=params) self.assertEqual(fake, resp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_config.py0000664000175000017500000001074500000000000022605 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import shutil import fixtures import oslo_middleware from glance.api.middleware import context from glance.common import config from glance.tests import utils as test_utils class TestPasteApp(test_utils.BaseTestCase): def setUp(self): super(TestPasteApp, self).setUp() def _do_test_load_paste_app(self, expected_app_type, make_paste_file=True, paste_flavor=None, paste_config_file=None, paste_append=None): def _writeto(path, str): with open(path, 'w') as f: f.write(str or '') f.flush() def _appendto(orig, copy, str): shutil.copy(orig, copy) with open(copy, 'a') as f: f.write(str or '') f.flush() self.config(flavor=paste_flavor, config_file=paste_config_file, group='paste_deploy') temp_dir = self.useFixture(fixtures.TempDir()).path temp_file = os.path.join(temp_dir, 'testcfg.conf') _writeto(temp_file, '[DEFAULT]\n') config.parse_args(['--config-file', temp_file]) paste_to = temp_file.replace('.conf', '-paste.ini') if not paste_config_file and make_paste_file: paste_from = os.path.join(os.getcwd(), 'etc/glance-api-paste.ini') _appendto(paste_from, paste_to, paste_append) app = config.load_paste_app('glance-api') self.assertIsInstance(app['/'], expected_app_type) def test_load_paste_app(self): expected_middleware = oslo_middleware.CORS self._do_test_load_paste_app(expected_middleware) def test_load_paste_app_paste_config_not_found(self): expected_middleware = context.UnauthenticatedContextMiddleware self.assertRaises(RuntimeError, self._do_test_load_paste_app, expected_middleware, make_paste_file=False) def test_load_paste_app_with_paste_flavor(self): pipeline = ('[composite:glance-api-incomplete]\n' 'paste.composite_factory = glance.api:root_app_factory\n' '/: api-incomplete\n' '/healthcheck: healthcheck\n' '[pipeline:api-incomplete]\n' 'pipeline = context rootapp') expected_middleware = context.ContextMiddleware self._do_test_load_paste_app(expected_middleware, paste_flavor='incomplete', paste_append=pipeline) def test_load_paste_app_with_paste_config_file(self): paste_config_file = os.path.join(os.getcwd(), 'etc/glance-api-paste.ini') expected_middleware = oslo_middleware.CORS self._do_test_load_paste_app(expected_middleware, paste_config_file=paste_config_file) def test_load_paste_app_with_paste_config_file_but_not_exist(self): paste_config_file = os.path.abspath("glance-api-paste.ini") expected_middleware = oslo_middleware.Healthcheck self.assertRaises(RuntimeError, self._do_test_load_paste_app, expected_middleware, paste_config_file=paste_config_file) def test_get_path_non_exist(self): self.assertRaises(RuntimeError, config._get_deployment_config_file) class TestDefaultConfig(test_utils.BaseTestCase): def setUp(self): super(TestDefaultConfig, self).setUp() self.CONF = config.cfg.CONF self.CONF.import_group('profiler', 'glance.common.wsgi') def test_osprofiler_disabled(self): self.assertFalse(self.CONF.profiler.enabled) self.assertFalse(self.CONF.profiler.trace_sqlalchemy) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_exception.py0000664000175000017500000000401400000000000023326 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http from oslo_utils import encodeutils from glance.common import exception from glance.tests import utils as test_utils class GlanceExceptionTestCase(test_utils.BaseTestCase): def test_default_error_msg(self): class FakeGlanceException(exception.GlanceException): message = "default message" exc = FakeGlanceException() self.assertEqual('default message', encodeutils.exception_to_unicode(exc)) def test_specified_error_msg(self): msg = exception.GlanceException('test') self.assertIn('test', encodeutils.exception_to_unicode(msg)) def test_default_error_msg_with_kwargs(self): class FakeGlanceException(exception.GlanceException): message = "default message: %(code)s" exc = FakeGlanceException(code=int(http.INTERNAL_SERVER_ERROR)) self.assertEqual("default message: 500", encodeutils.exception_to_unicode(exc)) def test_specified_error_msg_with_kwargs(self): msg = exception.GlanceException('test: %(code)s', code=int(http.INTERNAL_SERVER_ERROR)) self.assertIn('test: 500', encodeutils.exception_to_unicode(msg)) def test_non_unicode_error_msg(self): exc = exception.GlanceException('test') self.assertIsInstance(encodeutils.exception_to_unicode(exc), str) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_format_inspector.py0000664000175000017500000006214500000000000024717 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os import re import struct import subprocess import tempfile from unittest import mock from oslo_utils import units from glance.common import format_inspector from glance.tests import utils as test_utils TEST_IMAGE_PREFIX = 'glance-unittest-formatinspector-' def get_size_from_qemu_img(filename): output = subprocess.check_output('qemu-img info "%s"' % filename, shell=True) for line in output.split(b'\n'): m = re.search(b'^virtual size: .* .([0-9]+) bytes', line.strip()) if m: return int(m.group(1)) raise Exception('Could not find virtual size with qemu-img') class TestFormatInspectors(test_utils.BaseTestCase): def setUp(self): super(TestFormatInspectors, self).setUp() self._created_files = [] def tearDown(self): super(TestFormatInspectors, self).tearDown() for fn in self._created_files: try: os.remove(fn) except Exception: pass def _create_iso(self, image_size, subformat='9660'): """Create an ISO file of the given size. :param image_size: The size of the image to create in bytes :param subformat: The subformat to use, if any """ # these tests depend on mkisofs # being installed and in the path, # if it is not installed, skip try: subprocess.check_output('mkisofs --version', shell=True) except Exception: self.skipTest('mkisofs not installed') size = image_size // units.Mi base_cmd = "mkisofs" if subformat == 'udf': # depending on the distribution mkisofs may not support udf # and may be provided by genisoimage instead. As a result we # need to check if the command supports udf via help # instead of checking the installed version. # mkisofs --help outputs to stderr so we need to # redirect it to stdout to use grep. try: subprocess.check_output( 'mkisofs --help 2>&1 | grep udf', shell=True) except Exception: self.skipTest('mkisofs does not support udf format') base_cmd += " -udf" prefix = TEST_IMAGE_PREFIX prefix += '-%s-' % subformat fn = tempfile.mktemp(prefix=prefix, suffix='.iso') self._created_files.append(fn) subprocess.check_output( 'dd if=/dev/zero of=%s bs=1M count=%i' % (fn, size), shell=True) subprocess.check_output( '%s -V "TEST" -o %s %s' % (base_cmd, fn, fn), shell=True) return fn def _generate_bad_iso(self): # we want to emulate a malicious user who uploads a an # ISO file has a qcow2 header in the system area # of the ISO file # we will create a qcow2 image and an ISO file # and then copy the qcow2 header to the ISO file # e.g. # mkisofs -o orig.iso /etc/resolv.conf # qemu-img create orig.qcow2 -f qcow2 64M # dd if=orig.qcow2 of=outcome bs=32K count=1 # dd if=orig.iso of=outcome bs=32K skip=1 seek=1 qcow = self._create_img('qcow2', 10 * units.Mi) iso = self._create_iso(64 * units.Mi, subformat='9660') # first ensure the files are valid iso_fmt = self._test_format_at_block_size('iso', iso, 4 * units.Ki) self.assertTrue(iso_fmt.format_match) qcow_fmt = self._test_format_at_block_size('qcow2', qcow, 4 * units.Ki) self.assertTrue(qcow_fmt.format_match) # now copy the qcow2 header to an ISO file prefix = TEST_IMAGE_PREFIX prefix += '-bad-' fn = tempfile.mktemp(prefix=prefix, suffix='.iso') self._created_files.append(fn) subprocess.check_output( 'dd if=%s of=%s bs=32K count=1' % (qcow, fn), shell=True) subprocess.check_output( 'dd if=%s of=%s bs=32K skip=1 seek=1' % (iso, fn), shell=True) return qcow, iso, fn def _create_img(self, fmt, size, subformat=None, options=None, backing_file=None): """Create an image file of the given format and size. :param fmt: The format to create :param size: The size of the image to create in bytes :param subformat: The subformat to use, if any :param options: A dictionary of options to pass to the format :param backing_file: The backing file to use, if any """ if fmt == 'iso': return self._create_iso(size, subformat) # these tests depend on qemu-img # being installed and in the path, # if it is not installed, skip try: subprocess.check_output('qemu-img --version', shell=True) except Exception: self.skipTest('qemu-img not installed') if fmt == 'vhd': # QEMU calls the vhd format vpc fmt = 'vpc' if options is None: options = {} opt = '' prefix = TEST_IMAGE_PREFIX if subformat: options['subformat'] = subformat prefix += subformat + '-' if options: opt += '-o ' + ','.join('%s=%s' % (k, v) for k, v in options.items()) if backing_file is not None: opt += ' -b %s -F raw' % backing_file fn = tempfile.mktemp(prefix=prefix, suffix='.%s' % fmt) self._created_files.append(fn) subprocess.check_output( 'qemu-img create -f %s %s %s %i' % (fmt, opt, fn, size), shell=True) return fn def _create_allocated_vmdk(self, size_mb, subformat=None): # We need a "big" VMDK file to exercise some parts of the code of the # format_inspector. A way to create one is to first create an empty # file, and then to convert it with the -S 0 option. if subformat is None: # Matches qemu-img default, see `qemu-img convert -O vmdk -o help` subformat = 'monolithicSparse' prefix = TEST_IMAGE_PREFIX prefix += '-%s-' % subformat fn = tempfile.mktemp(prefix=prefix, suffix='.vmdk') self._created_files.append(fn) raw = tempfile.mktemp(prefix=prefix, suffix='.raw') self._created_files.append(raw) # Create a file with pseudo-random data, otherwise it will get # compressed in the streamOptimized format subprocess.check_output( 'dd if=/dev/urandom of=%s bs=1M count=%i' % (raw, size_mb), shell=True) # Convert it to VMDK subprocess.check_output( 'qemu-img convert -f raw -O vmdk -o subformat=%s -S 0 %s %s' % ( subformat, raw, fn), shell=True) return fn def _test_format_at_block_size(self, format_name, img, block_size): fmt = format_inspector.get_inspector(format_name)() self.assertIsNotNone(fmt, 'Did not get format inspector for %s' % ( format_name)) wrapper = format_inspector.InfoWrapper(open(img, 'rb'), fmt) while True: chunk = wrapper.read(block_size) if not chunk: break wrapper.close() return fmt def _test_format_at_image_size(self, format_name, image_size, subformat=None): """Test the format inspector for the given format at the given image size. :param format_name: The format to test :param image_size: The size of the image to create in bytes :param subformat: The subformat to use, if any """ img = self._create_img(format_name, image_size, subformat=subformat) # Some formats have internal alignment restrictions making this not # always exactly like image_size, so get the real value for comparison virtual_size = get_size_from_qemu_img(img) # Read the format in various sizes, some of which will read whole # sections in a single read, others will be completely unaligned, etc. block_sizes = [64 * units.Ki, 1 * units.Mi] # ISO images have a 32KB system area at the beginning of the image # as a result reading that in 17 or 512 byte blocks takes too long, # causing the test to fail. The 64KiB block size is enough to read # the system area and header in a single read. the 1MiB block size # adds very little time to the test so we include it. if format_name != 'iso': block_sizes.extend([17, 512]) for block_size in block_sizes: fmt = self._test_format_at_block_size(format_name, img, block_size) self.assertTrue(fmt.format_match, 'Failed to match %s at size %i block %i' % ( format_name, image_size, block_size)) self.assertEqual(virtual_size, fmt.virtual_size, ('Failed to calculate size for %s at size %i ' 'block %i') % (format_name, image_size, block_size)) memory = sum(fmt.context_info.values()) self.assertLess(memory, 512 * units.Ki, 'Format used more than 512KiB of memory: %s' % ( fmt.context_info)) def _test_format(self, format_name, subformat=None): # Try a few different image sizes, including some odd and very small # sizes for image_size in (512, 513, 2057, 7): self._test_format_at_image_size(format_name, image_size * units.Mi, subformat=subformat) def test_qcow2(self): self._test_format('qcow2') def test_iso_9660(self): self._test_format('iso', subformat='9660') def test_udf(self): self._test_format('iso', subformat='udf') def test_bad_iso_qcow2(self): _, _, fn = self._generate_bad_iso() iso_check = self._test_format_at_block_size('iso', fn, 4 * units.Ki) qcow_check = self._test_format_at_block_size('qcow2', fn, 4 * units.Ki) # this system area of the ISO file is not considered part of the format # the qcow2 header is in the system area of the ISO file # so the ISO file is still valid self.assertTrue(iso_check.format_match) # the qcow2 header is in the system area of the ISO file # but that will be parsed by the qcow2 format inspector # and it will match self.assertTrue(qcow_check.format_match) # if we call format_inspector.detect_file_format it should detect # and raise an exception because both match internally. e = self.assertRaises( format_inspector.ImageFormatError, format_inspector.detect_file_format, fn) self.assertIn('Multiple formats detected', str(e)) def test_vhd(self): self._test_format('vhd') def test_vhdx(self): self._test_format('vhdx') def test_vmdk(self): self._test_format('vmdk') def test_vmdk_stream_optimized(self): self._test_format('vmdk', 'streamOptimized') def test_from_file_reads_minimum(self): img = self._create_img('qcow2', 10 * units.Mi) file_size = os.stat(img).st_size fmt = format_inspector.QcowInspector.from_file(img) # We know everything we need from the first 512 bytes of a QCOW image, # so make sure that we did not read the whole thing when we inspect # a local file. self.assertLess(fmt.actual_size, file_size) def qed_supported(self): output = subprocess.check_output('qemu-img create --help', shell=True) return b' qed ' in output def test_qed_always_unsafe(self): if not self.qed_supported(): raise self.skipException('qed format not supported by qemu-img') img = self._create_img('qed', 10 * units.Mi) fmt = format_inspector.get_inspector('qed').from_file(img) self.assertTrue(fmt.format_match) self.assertFalse(fmt.safety_check()) def _test_vmdk_bad_descriptor_offset(self, subformat=None): format_name = 'vmdk' image_size = 10 * units.Mi descriptorOffsetAddr = 0x1c BAD_ADDRESS = 0x400 img = self._create_img(format_name, image_size, subformat=subformat) # Corrupt the header fd = open(img, 'r+b') fd.seek(descriptorOffsetAddr) fd.write(struct.pack('= max_iterations: return def _test_reader_chunked(self, chunk_size, read_size, max_iterations=5): generator = self._create_generator(chunk_size, max_iterations) reader = utils.CooperativeReader(generator) result = bytearray() while True: data = reader.read(read_size) if len(data) == 0: break self.assertLessEqual(len(data), read_size) result += data expected = (b'a' * chunk_size + b'b' * chunk_size + b'c' * chunk_size + b'a' * chunk_size + b'b' * chunk_size) self.assertEqual(expected, bytes(result)) def test_cooperative_reader_preserves_size_chunk_less_then_read(self): self._test_reader_chunked(43, 101) def test_cooperative_reader_preserves_size_chunk_equals_read(self): self._test_reader_chunked(1024, 1024) def test_cooperative_reader_preserves_size_chunk_more_then_read(self): chunk_size = 16 * 1024 * 1024 # 16 Mb, as in remote http source read_size = 8 * 1024 # 8k, as in httplib self._test_reader_chunked(chunk_size, read_size) def test_limiting_reader(self): """Ensure limiting reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 data = io.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) bytes_read = 0 data = io.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read) def test_limiting_reader_fails(self): """Ensure limiting reader class throws exceptions if limit exceeded""" BYTES = 1024 def _consume_all_iter(): bytes_read = 0 data = io.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES - 1): bytes_read += len(chunk) self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_iter) def _consume_all_read(): bytes_read = 0 data = io.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES - 1) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_read) def test_get_meta_from_headers(self): resp = webob.Response() resp.headers = {"x-image-meta-name": 'test', 'x-image-meta-virtual-size': 80} result = utils.get_image_meta_from_headers(resp) self.assertEqual({'name': 'test', 'properties': {}, 'virtual_size': 80}, result) def test_get_meta_from_headers_none_virtual_size(self): resp = webob.Response() resp.headers = {"x-image-meta-name": 'test', 'x-image-meta-virtual-size': 'None'} result = utils.get_image_meta_from_headers(resp) self.assertEqual({'name': 'test', 'properties': {}, 'virtual_size': None}, result) def test_get_meta_from_headers_bad_headers(self): resp = webob.Response() resp.headers = {"x-image-meta-bad": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) resp.headers = {"x-image-meta-": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) resp.headers = {"x-image-meta-*": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) def test_image_meta(self): image_meta = {'x-image-meta-size': 'test'} image_meta_properties = {'properties': {'test': "test"}} actual = utils.image_meta_to_http_headers(image_meta) actual_test2 = utils.image_meta_to_http_headers( image_meta_properties) self.assertEqual({'x-image-meta-x-image-meta-size': 'test'}, actual) self.assertEqual({'x-image-meta-property-test': 'test'}, actual_test2) def test_create_mashup_dict_with_different_core_custom_properties(self): image_meta = { 'id': 'test-123', 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': True, 'updated_at': '', 'properties': {'test_key': 'test_1234'}, } mashup_dict = utils.create_mashup_dict(image_meta) self.assertNotIn('properties', mashup_dict) self.assertEqual(image_meta['properties']['test_key'], mashup_dict['test_key']) def test_create_mashup_dict_with_same_core_custom_properties(self): image_meta = { 'id': 'test-123', 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': True, 'updated_at': '', 'properties': {'min_ram': '2048M'}, } mashup_dict = utils.create_mashup_dict(image_meta) self.assertNotIn('properties', mashup_dict) self.assertNotEqual(image_meta['properties']['min_ram'], mashup_dict['min_ram']) self.assertEqual(image_meta['min_ram'], mashup_dict['min_ram']) def test_mutating(self): class FakeContext(object): def __init__(self): self.read_only = False class Fake(object): def __init__(self): self.context = FakeContext() def fake_function(req, context): return 'test passed' req = webob.Request.blank('/some_request') result = utils.mutating(fake_function) self.assertEqual("test passed", result(req, Fake())) def test_valid_hostname(self): valid_inputs = ['localhost', 'glance04-a' 'G', '528491'] for input_str in valid_inputs: self.assertTrue(utils.is_valid_hostname(input_str)) def test_valid_hostname_fail(self): invalid_inputs = ['localhost.localdomain', '192.168.0.1', '\u2603', 'glance02.stack42.local'] for input_str in invalid_inputs: self.assertFalse(utils.is_valid_hostname(input_str)) def test_valid_fqdn(self): valid_inputs = ['localhost.localdomain', 'glance02.stack42.local' 'glance04-a.stack47.local', 'img83.glance.xn--penstack-r74e.org'] for input_str in valid_inputs: self.assertTrue(utils.is_valid_fqdn(input_str)) def test_valid_fqdn_fail(self): invalid_inputs = ['localhost', '192.168.0.1', '999.88.77.6', '\u2603.local', 'glance02.stack42'] for input_str in invalid_inputs: self.assertFalse(utils.is_valid_fqdn(input_str)) def test_valid_host_port_string(self): valid_pairs = ['10.11.12.13:80', '172.17.17.1:65535', '[fe80::a:b:c:d]:9990', 'localhost:9990', 'localhost.localdomain:9990', 'glance02.stack42.local:1234', 'glance04-a.stack47.local:1234', 'img83.glance.xn--penstack-r74e.org:13080'] for pair_str in valid_pairs: host, port = utils.parse_valid_host_port(pair_str) escaped = pair_str.startswith('[') expected_host = '%s%s%s' % ('[' if escaped else '', host, ']' if escaped else '') self.assertTrue(pair_str.startswith(expected_host)) self.assertGreater(port, 0) expected_pair = '%s:%d' % (expected_host, port) self.assertEqual(expected_pair, pair_str) def test_valid_host_port_string_fail(self): invalid_pairs = ['', '10.11.12.13', '172.17.17.1:99999', '290.12.52.80:5673', 'absurd inputs happen', '\u2601', '\u2603:8080', 'fe80::1', '[fe80::2]', ':5673', '[fe80::a:b:c:d]9990', 'fe80:a:b:c:d:e:f:1:2:3:4', 'fe80:a:b:c:d:e:f:g', 'fe80::1:8080', '[fe80:a:b:c:d:e:f:g]:9090', '[a:b:s:u:r:d]:fe80'] for pair in invalid_pairs: self.assertRaises(ValueError, utils.parse_valid_host_port, pair) def test_get_stores_from_request_returns_default(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") req = webob.Request.blank('/some_request') mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = utils.get_stores_from_request(req, {}) self.assertEqual(["ceph1"], result) mock_get_store.assert_called_once_with("ceph1") def test_get_stores_from_request_returns_stores_from_body(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") body = {"stores": ["ceph1", "ceph2"]} req = webob.Request.blank("/some_request") mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = utils.get_stores_from_request(req, body) self.assertEqual(["ceph1", "ceph2"], result) mock_get_store.assert_any_call("ceph1") mock_get_store.assert_any_call("ceph2") self.assertEqual(mock_get_store.call_count, 2) def test_get_stores_from_request_returns_store_from_headers(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") headers = {"x-image-meta-store": "ceph2"} req = webob.Request.blank("/some_request", headers=headers) mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = utils.get_stores_from_request(req, {}) self.assertEqual(["ceph2"], result) mock_get_store.assert_called_once_with("ceph2") def test_get_stores_from_request_raises_bad_request(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") headers = {"x-image-meta-store": "ceph2"} body = {"stores": ["ceph1"]} req = webob.Request.blank("/some_request", headers=headers) self.assertRaises(webob.exc.HTTPBadRequest, utils.get_stores_from_request, req, body) def test_get_stores_from_request_returns_all_stores(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } reserved_stores = { 'os_glance_staging_store': 'file', 'os_glance_tasks_store': 'file' } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF, reserved_stores=reserved_stores) self.config(default_backend="ceph1", group="glance_store") body = {"all_stores": True} req = webob.Request.blank("/some_request") mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = sorted(utils.get_stores_from_request(req, body)) self.assertEqual(["ceph1", "ceph2"], result) mock_get_store.assert_any_call("ceph1") mock_get_store.assert_any_call("ceph2") self.assertEqual(mock_get_store.call_count, 2) self.assertNotIn('os_glance_staging_store', result) self.assertNotIn('os_glance_tasks_store', result) def test_get_stores_from_request_excludes_reserved_stores(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") body = {"all_stores": True} req = webob.Request.blank("/some_request") mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = sorted(utils.get_stores_from_request(req, body)) self.assertEqual(["ceph1", "ceph2"], result) mock_get_store.assert_any_call("ceph1") mock_get_store.assert_any_call("ceph2") self.assertEqual(mock_get_store.call_count, 2) def test_get_stores_from_request_excludes_readonly_store(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd", "http": "http" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") body = {"all_stores": True} req = webob.Request.blank("/some_request") mp = "glance.common.utils.glance_store.get_store_from_store_identifier" with mock.patch(mp) as mock_get_store: result = sorted(utils.get_stores_from_request(req, body)) self.assertNotIn("http", result) self.assertEqual(["ceph1", "ceph2"], result) mock_get_store.assert_any_call("ceph1") mock_get_store.assert_any_call("ceph2") self.assertEqual(mock_get_store.call_count, 2) def test_get_stores_from_request_raises_bad_request_with_all_stores(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") headers = {"x-image-meta-store": "ceph2"} body = {"stores": ["ceph1"], "all_stores": True} req = webob.Request.blank("/some_request", headers=headers) self.assertRaises(webob.exc.HTTPBadRequest, utils.get_stores_from_request, req, body) def test_single_store_http_enabled_and_http_not_in_url(self): self.config(stores="http,file", group="glance_store") loc_url = "rbd://aaaaaaaa/images/id" self.assertFalse(utils.is_http_store_configured(loc_url)) def test_single_store_http_disabled_and_http_in_url(self): self.config(stores="rbd,file", group="glance_store") loc_url = BASE_URI self.assertFalse(utils.is_http_store_configured(loc_url)) def test_single_store_http_enabled_and_http_in_url(self): self.config(stores="http,file", group="glance_store") loc_url = BASE_URI self.assertTrue(utils.is_http_store_configured(loc_url)) def test_multiple_store_http_enabled_and_http_not_in_url(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd", "http": "http" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="http", group="glance_store") loc_url = "rbd://aaaaaaaa/images/id" self.assertFalse(utils.is_http_store_configured(loc_url)) def test_multiple_store_http_disabled_and_http_in_url(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd", } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="ceph1", group="glance_store") loc_url = BASE_URI self.assertFalse(utils.is_http_store_configured(loc_url)) def test_multiple_store_http_enabled_and_http_in_url(self): enabled_backends = { "ceph1": "rbd", "ceph2": "rbd", "http": "http" } self.config(enabled_backends=enabled_backends) store.register_store_opts(CONF) self.config(default_backend="http", group="glance_store") loc_url = BASE_URI self.assertTrue(utils.is_http_store_configured(loc_url)) class SplitFilterOpTestCase(test_utils.BaseTestCase): def test_less_than_operator(self): expr = 'lt:bar' returned = utils.split_filter_op(expr) self.assertEqual(('lt', 'bar'), returned) def test_less_than_equal_operator(self): expr = 'lte:bar' returned = utils.split_filter_op(expr) self.assertEqual(('lte', 'bar'), returned) def test_greater_than_operator(self): expr = 'gt:bar' returned = utils.split_filter_op(expr) self.assertEqual(('gt', 'bar'), returned) def test_greater_than_equal_operator(self): expr = 'gte:bar' returned = utils.split_filter_op(expr) self.assertEqual(('gte', 'bar'), returned) def test_not_equal_operator(self): expr = 'neq:bar' returned = utils.split_filter_op(expr) self.assertEqual(('neq', 'bar'), returned) def test_equal_operator(self): expr = 'eq:bar' returned = utils.split_filter_op(expr) self.assertEqual(('eq', 'bar'), returned) def test_in_operator(self): expr = 'in:bar' returned = utils.split_filter_op(expr) self.assertEqual(('in', 'bar'), returned) def test_split_filter_value_for_quotes(self): expr = '\"fake\\\"name\",fakename,\"fake,name\"' returned = utils.split_filter_value_for_quotes(expr) list_values = ['fake\\"name', 'fakename', 'fake,name'] self.assertEqual(list_values, returned) def test_validate_quotes(self): expr = '\"aaa\\\"aa\",bb,\"cc\"' returned = utils.validate_quotes(expr) self.assertIsNone(returned) invalid_expr = ['\"aa', 'ss\"', 'aa\"bb\"cc', '\"aa\"\"bb\"'] for expr in invalid_expr: self.assertRaises(exception.InvalidParameterValue, utils.validate_quotes, expr) def test_default_operator(self): expr = 'bar' returned = utils.split_filter_op(expr) self.assertEqual(('eq', expr), returned) def test_default_operator_with_datetime(self): expr = '2015-08-27T09:49:58Z' returned = utils.split_filter_op(expr) self.assertEqual(('eq', expr), returned) def test_operator_with_datetime(self): expr = 'lt:2015-08-27T09:49:58Z' returned = utils.split_filter_op(expr) self.assertEqual(('lt', '2015-08-27T09:49:58Z'), returned) class EvaluateFilterOpTestCase(test_utils.BaseTestCase): def test_less_than_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'lt', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'lt', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'lt', 10)) def test_less_than_equal_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'lte', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'lte', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'lte', 10)) def test_greater_than_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'gt', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'gt', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'gt', 10)) def test_greater_than_equal_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'gte', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'gte', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'gte', 10)) def test_not_equal_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'neq', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'neq', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'neq', 10)) def test_equal_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'eq', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'eq', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'eq', 10)) def test_invalid_operator(self): self.assertRaises(exception.InvalidFilterOperatorValue, utils.evaluate_filter_op, '10', 'bar', '8') class ImportURITestCase(test_utils.BaseTestCase): def test_validate_import_uri(self): self.assertTrue(utils.validate_import_uri("http://foo.com")) self.config(allowed_schemes=['http'], group='import_filtering_opts') self.config(allowed_hosts=['example.com'], group='import_filtering_opts') self.assertTrue(utils.validate_import_uri("http://example.com")) self.config(allowed_ports=['8080'], group='import_filtering_opts') self.assertTrue(utils.validate_import_uri("http://example.com:8080")) def test_invalid_import_uri(self): self.assertFalse(utils.validate_import_uri("")) self.assertFalse(utils.validate_import_uri("fake_uri")) self.config(disallowed_schemes=['ftp'], group='import_filtering_opts') self.assertFalse(utils.validate_import_uri("ftp://example.com")) self.config(disallowed_hosts=['foo.com'], group='import_filtering_opts') self.assertFalse(utils.validate_import_uri("ftp://foo.com")) self.config(disallowed_ports=['8484'], group='import_filtering_opts') self.assertFalse(utils.validate_import_uri("http://localhost:8484")) def test_ignored_filtering_options(self): LOG = logging.getLogger('glance.common.utils') with mock.patch.object(LOG, 'debug') as mock_run: self.config(allowed_schemes=['https', 'ftp'], group='import_filtering_opts') self.config(disallowed_schemes=['ftp'], group='import_filtering_opts') self.assertTrue(utils.validate_import_uri("ftp://foo.com")) mock_run.assert_called_once() with mock.patch.object(LOG, 'debug') as mock_run: self.config(allowed_schemes=[], group='import_filtering_opts') self.config(disallowed_schemes=[], group='import_filtering_opts') self.config(allowed_hosts=['example.com', 'foo.com'], group='import_filtering_opts') self.config(disallowed_hosts=['foo.com'], group='import_filtering_opts') self.assertTrue(utils.validate_import_uri("ftp://foo.com")) mock_run.assert_called_once() with mock.patch.object(LOG, 'debug') as mock_run: self.config(allowed_hosts=[], group='import_filtering_opts') self.config(disallowed_hosts=[], group='import_filtering_opts') self.config(allowed_ports=[8080, 8484], group='import_filtering_opts') self.config(disallowed_ports=[8484], group='import_filtering_opts') self.assertTrue(utils.validate_import_uri("ftp://foo.com:8484")) mock_run.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_wsgi.py0000664000175000017500000010203500000000000022303 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import gettext import http.client as http import os import socket from unittest import mock import eventlet.patcher import fixtures from oslo_concurrency import processutils from oslo_serialization import jsonutils import routes import webob from glance.api.v2 import router as router_v2 from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import i18n from glance.image_cache import prefetcher from glance.tests import utils as test_utils class RequestTest(test_utils.BaseTestCase): def _set_expected_languages(self, all_locales=None, avail_locales=None): if all_locales is None: all_locales = [] # Override gettext.find to return other than None for some languages. def fake_gettext_find(lang_id, *args, **kwargs): found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id if avail_locales is None: # All locales are available. return found_ret languages = kwargs['languages'] if languages[0] in avail_locales: return found_ret return None self.mock_object(gettext, 'find', fake_gettext_find) def test_content_range(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes 10-99/*' range_ = request.get_range_from_request(120) self.assertEqual(10, range_.start) self.assertEqual(100, range_.stop) # non-inclusive self.assertIsNone(range_.length) def test_content_range_invalid(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes=0-99' self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, request.get_range_from_request, 120) def test_range(self): request = wsgi.Request.blank('/tests/123') request.headers["Range"] = 'bytes=10-99' range_ = request.get_range_from_request(120) self.assertEqual(10, range_.start) self.assertEqual(100, range_.end) # non-inclusive def test_range_invalid(self): request = wsgi.Request.blank('/tests/123') request.headers["Range"] = 'bytes=150-' self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, request.get_range_from_request, 120) def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123') self.assertRaises(exception.InvalidContentType, request.get_content_type, ('application/xml',)) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "text/html" self.assertRaises(exception.InvalidContentType, request.get_content_type, ('application/xml',)) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type(('application/json',)) self.assertEqual("application/json", result) def test_params(self): expected = webob.multidict.NestedMultiDict({ 'limit': '20', 'name': 'Привет', 'sort_key': 'name', 'sort_dir': 'asc'}) request = wsgi.Request.blank("/?limit=20&name=%D0%9F%D1%80%D0%B8" "%D0%B2%D0%B5%D1%82&sort_key=name" "&sort_dir=asc") actual = request.params self.assertEqual(expected, actual) def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_json(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_xml_json(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_json_xml_quality(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_language_accept_default(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8" result = request.best_match_language() self.assertIsNone(result) def test_language_accept_none(self): request = wsgi.Request.blank('/tests/123') result = request.best_match_language() self.assertIsNone(result) def test_best_match_language_expected(self): # If Accept-Language is a supported language, best_match_language() # returns it. self._set_expected_languages(all_locales=['it']) req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'}) self.assertEqual('it', req.best_match_language()) def test_request_match_language_unexpected(self): # If Accept-Language is a language we do not support, # best_match_language() returns None. self._set_expected_languages(all_locales=['it']) req = wsgi.Request.blank('/', headers={'Accept-Language': 'unknown'}) self.assertIsNone(req.best_match_language()) @mock.patch.object(webob.acceptparse.AcceptLanguageValidHeader, 'lookup') def test_best_match_language_unknown(self, mock_lookup): # Test that we are actually invoking language negotiation by WebOb request = wsgi.Request.blank('/') accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} # Bug #1765748: see comment in code in the function under test # to understand why this is the correct return value for the # webob 1.8.x mock mock_lookup.return_value = 'fake_LANG' self.assertIsNone(request.best_match_language()) mock_lookup.assert_called_once() # If Accept-Language is missing or empty, match should be None request.headers = {'Accept-Language': ''} self.assertIsNone(request.best_match_language()) request.headers.pop('Accept-Language') self.assertIsNone(request.best_match_language()) def test_http_error_response_codes(self): sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2' """Makes sure v2 unallowed methods return 405""" unallowed_methods = [ ('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']), ('/images/%s/file' % sample_id, ['POST', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s/tags/%s' % (sample_id, tag_val), ['GET', 'POST', 'PATCH', 'HEAD']), ('/images/%s/members' % sample_id, ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s/members/%s' % (sample_id, member_id), ['POST', 'PATCH', 'HEAD']), ('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']), ] api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper())) for uri, methods in unallowed_methods: for method in methods: req = webob.Request.blank(uri) req.method = method res = req.get_response(api) self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) # Makes sure not implemented methods return 405 req = webob.Request.blank('/schemas/image') req.method = 'NonexistentMethod' res = req.get_response(api) self.assertEqual(http.METHOD_NOT_ALLOWED, res.status_int) class ResourceTest(test_utils.BaseTestCase): def test_get_action_args(self): env = { 'wsgiorg.routing_args': [ None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12, }, ], } expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_invalid_index(self): env = {'wsgiorg.routing_args': []} expected = {} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_controller_error(self): actions = {'format': None, 'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_format_error(self): actions = {'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_dispatch(self): class Controller(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_default(self): class Controller(object): def default(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_no_default(self): class Controller(object): def show(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) self.assertRaises(AttributeError, resource.dispatch, Controller(), 'index', 'on', pants='off') def test_dispatch_raises_bad_request(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) def dispatch(self, obj, action, *args, **kwargs): raise exception.InvalidPropertyProtectionConfiguration() self.mock_object(wsgi.Resource, 'dispatch', dispatch) request = wsgi.Request.blank('/') self.assertRaises(webob.exc.HTTPBadRequest, resource.__call__, request) def test_call(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) def dispatch(self, obj, action, *args, **kwargs): if isinstance(obj, wsgi.JSONRequestDeserializer): return [] if isinstance(obj, wsgi.JSONResponseSerializer): raise webob.exc.HTTPForbidden() self.mock_object(wsgi.Resource, 'dispatch', dispatch) request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPForbidden) self.assertEqual(http.FORBIDDEN, response.status_code) def test_call_raises_exception(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) def dispatch(self, obj, action, *args, **kwargs): raise Exception("test exception") self.mock_object(wsgi.Resource, 'dispatch', dispatch) request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPInternalServerError) self.assertEqual(http.INTERNAL_SERVER_ERROR, response.status_code) @mock.patch.object(wsgi, 'translate_exception') def test_resource_call_error_handle_localized(self, mock_translate_exception): class Controller(object): def delete(self, req, identity): raise webob.exc.HTTPBadRequest(explanation='Not Found') actions = {'action': 'delete', 'identity': 12} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) message_es = 'No Encontrado' resource = wsgi.Resource(Controller(), wsgi.JSONRequestDeserializer(), None) translated_exc = webob.exc.HTTPBadRequest(message_es) mock_translate_exception.return_value = translated_exc e = self.assertRaises(webob.exc.HTTPBadRequest, resource, request) self.assertEqual(message_es, str(e)) @mock.patch.object(webob.acceptparse.AcceptLanguageValidHeader, 'lookup') @mock.patch.object(i18n, 'translate') def test_translate_exception(self, mock_translate, mock_lookup): mock_translate.return_value = 'No Encontrado' mock_lookup.return_value = 'de' req = wsgi.Request.blank('/tests/123') req.headers["Accept-Language"] = "de" e = webob.exc.HTTPNotFound(explanation='Not Found') e = wsgi.translate_exception(req, e) self.assertEqual('No Encontrado', e.explanation) def test_response_headers_encoded(self): # prepare environment for_openstack_comrades = ( '\u0417\u0430 \u043e\u043f\u0435\u043d\u0441\u0442\u0435\u043a, ' '\u0442\u043e\u0432\u0430\u0440\u0438\u0449\u0438') class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) class FakeSerializer(object): def index(self, response, result): response.headers['unicode_test'] = for_openstack_comrades # make request resource = wsgi.Resource(FakeController(), None, FakeSerializer()) actions = {'action': 'index'} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) response = resource.__call__(request) # ensure it has been encoded correctly value = response.headers['unicode_test'] self.assertEqual(for_openstack_comrades, value) class JSONResponseSerializerTest(test_utils.BaseTestCase): def test_to_json(self): fixture = {"key": "value"} expected = b'{"key": "value"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_date_format_value(self): fixture = {"date": datetime.datetime(1901, 3, 8, 2)} expected = b'{"date": "1901-03-08T02:00:00.000000"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_more_deep_format(self): fixture = {"is_public": True, "name": [{"name1": "test"}]} expected = {"is_public": True, "name": [{"name1": "test"}]} actual = wsgi.JSONResponseSerializer().to_json(fixture) actual = jsonutils.loads(actual) for k in expected: self.assertEqual(expected[k], actual[k]) def test_to_json_with_set(self): fixture = set(["foo"]) expected = b'["foo"]' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_default(self): fixture = {"key": "value"} response = webob.Response() wsgi.JSONResponseSerializer().default(response, fixture) self.assertEqual(http.OK, response.status_int) content_types = [h for h in response.headerlist if h[0] == 'Content-Type'] self.assertEqual(1, len(content_types)) self.assertEqual('application/json', response.content_type) self.assertEqual(b'{"key": "value"}', response.body) class JSONRequestDeserializerTest(test_utils.BaseTestCase): def test_has_body_no_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers.pop('Content-Length') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_zero_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers['Content-Length'] = 0 self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_has_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' self.assertIn('Content-Length', request.headers) self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request)) def test_no_body_no_content_length(self): request = wsgi.Request.blank('/') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_from_json(self): fixture = '{"key": "value"}' expected = {"key": "value"} actual = wsgi.JSONRequestDeserializer().from_json(fixture) self.assertEqual(expected, actual) def test_from_json_malformed(self): fixture = 'kjasdklfjsklajf' self.assertRaises(webob.exc.HTTPBadRequest, wsgi.JSONRequestDeserializer().from_json, fixture) def test_default_no_body(self): request = wsgi.Request.blank('/') actual = wsgi.JSONRequestDeserializer().default(request) expected = {} self.assertEqual(expected, actual) def test_default_with_body(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'{"key": "value"}' actual = wsgi.JSONRequestDeserializer().default(request) expected = {"body": {"key": "value"}} self.assertEqual(expected, actual) def test_has_body_has_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked')) def test_has_body_multiple_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked, gzip')) def test_has_body_invalid_transfer_encoding(self): self.assertFalse(self._check_transfer_encoding( transfer_encoding='invalid', content_length=0)) def test_has_body_invalid_transfer_encoding_no_content_len_and_body(self): self.assertFalse(self._check_transfer_encoding( transfer_encoding='invalid', include_body=False)) def test_has_body_invalid_transfer_encoding_no_content_len_but_body(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='invalid', include_body=True)) def test_has_body_invalid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='invalid', content_length=5)) def test_has_body_valid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked', content_length=1)) def test_has_body_valid_transfer_encoding_without_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked')) def _check_transfer_encoding(self, transfer_encoding=None, content_length=None, include_body=True): request = wsgi.Request.blank('/') request.method = 'POST' if include_body: request.body = b'fake_body' request.headers['transfer-encoding'] = transfer_encoding if content_length is not None: request.headers['content-length'] = content_length return wsgi.JSONRequestDeserializer().has_body(request) def test_get_bind_addr_default_value(self): expected = ('0.0.0.0', '123456') actual = wsgi.get_bind_addr(default_port="123456") self.assertEqual(expected, actual) class ServerTest(test_utils.BaseTestCase): @mock.patch.object(prefetcher, 'Prefetcher') def test_create_pool(self, mock_prefetcher): """Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool.""" actual = wsgi.Server(threads=1).create_pool() self.assertIsInstance(actual, eventlet.greenpool.GreenPool) @mock.patch.object(prefetcher, 'Prefetcher') @mock.patch.object(wsgi.Server, 'configure_socket') def test_reserved_stores_not_allowed(self, mock_configure_socket, mock_prefetcher): """Ensure the reserved stores are not allowed""" enabled_backends = {'os_glance_file_store': 'file'} self.config(enabled_backends=enabled_backends) server = wsgi.Server(threads=1, initialize_glance_store=True) self.assertRaises(RuntimeError, server.configure) @mock.patch.object(prefetcher, 'Prefetcher') @mock.patch.object(wsgi.Server, 'configure_socket') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_http_keepalive(self, mock_migrate_db, mock_configure_socket, mock_prefetcher): mock_migrate_db.return_value = False self.config(http_keepalive=False) self.config(workers=0) server = wsgi.Server(threads=1) server.sock = 'fake_socket' # mocking eventlet.wsgi server method to check it is called with # configured 'http_keepalive' value. with mock.patch.object(eventlet.wsgi, 'server') as mock_server: fake_application = "fake-application" server.start(fake_application, 0) server.wait() mock_server.assert_called_once_with('fake_socket', fake_application, log=server._logger, debug=False, custom_pool=server.pool, keepalive=False, socket_timeout=900) @mock.patch.object(prefetcher, 'Prefetcher') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_number_of_workers_posix(self, mock_migrate_db, mock_prefetcher): """Ensure the number of workers matches num cpus limited to 8.""" mock_migrate_db.return_value = False if os.name == 'nt': raise self.skipException("Unsupported platform.") def pid(): i = 1 while True: i = i + 1 yield i with mock.patch.object(os, 'fork') as mock_fork: with mock.patch('oslo_concurrency.processutils.get_worker_count', return_value=4): mock_fork.side_effect = pid server = wsgi.Server() server.configure = mock.Mock() fake_application = "fake-application" server.start(fake_application, None) self.assertEqual(4, len(server.children)) with mock.patch('oslo_concurrency.processutils.get_worker_count', return_value=24): mock_fork.side_effect = pid server = wsgi.Server() server.configure = mock.Mock() fake_application = "fake-application" server.start(fake_application, None) self.assertEqual(8, len(server.children)) mock_fork.side_effect = pid server = wsgi.Server() server.configure = mock.Mock() fake_application = "fake-application" server.start(fake_application, None) cpus = processutils.get_worker_count() expected_workers = cpus if cpus < 8 else 8 self.assertEqual(expected_workers, len(server.children)) @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_invalid_staging_uri(self, mock_migrate_db): mock_migrate_db.return_value = False self.config(node_staging_uri='http://good.luck') server = wsgi.Server() with mock.patch.object(server, 'start_wsgi'): # Make sure a stating URI with an bad scheme will abort startup self.assertRaises(exception.GlanceException, server.start, 'fake-application', 34567) @mock.patch('os.path.exists') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_missing_staging_dir(self, mock_migrate_db, mock_exists): mock_migrate_db.return_value = False mock_exists.return_value = False server = wsgi.Server() with mock.patch.object(server, 'start_wsgi'): # Since we are mocking out start_wsgi, create a fake pool ourselves server.pool = mock.MagicMock() with mock.patch.object(wsgi, 'LOG') as mock_log: server.start('fake-application', 34567) mock_exists.assert_called_once_with('/tmp/staging/') # Make sure a missing staging directory will log a warning. mock_log.warning.assert_called_once_with( 'Import methods are enabled but staging directory ' '%(path)s does not exist; Imports will fail!', {'path': '/tmp/staging/'}) class TestHelpers(test_utils.BaseTestCase): def test_headers_are_unicode(self): """ Verifies that the headers returned by conversion code are unicode. Headers are passed via http in non-testing mode, which automatically converts them to unicode. Verifying that the method does the conversion proves that we aren't passing data that works in tests but will fail in production. """ fixture = {'name': 'fake public image', 'is_public': True, 'size': 19, 'location': "file:///tmp/glance-tests/2", 'properties': {'distro': 'Ubuntu 10.04 LTS'}} headers = utils.image_meta_to_http_headers(fixture) for k, v in headers.items(): self.assertIsInstance(v, str) def test_data_passed_properly_through_headers(self): """ Verifies that data is the same after being passed through headers """ fixture = {'is_public': True, 'deleted': False, 'name': None, 'size': 19, 'location': "file:///tmp/glance-tests/2", 'properties': {'distro': 'Ubuntu 10.04 LTS'}} headers = utils.image_meta_to_http_headers(fixture) class FakeResponse(object): pass response = FakeResponse() response.headers = headers result = utils.get_image_meta_from_headers(response) for k, v in fixture.items(): if v is not None: self.assertEqual(v, result[k]) else: self.assertNotIn(k, result) class GetSocketTestCase(test_utils.BaseTestCase): def setUp(self): super(GetSocketTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.get_bind_addr", lambda x: ('192.168.0.13', 1234))) addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)), (2, 2, 17, '', ('192.168.0.13', 80)), (2, 3, 0, '', ('192.168.0.13', 80))] self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.socket.getaddrinfo", lambda *x: addr_info_list)) self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.time.time", mock.Mock(side_effect=[0, 1, 5, 10, 20, 35]))) wsgi.CONF.tcp_keepidle = 600 @mock.patch.object(prefetcher, 'Prefetcher') def test_correct_configure_socket(self, mock_prefetcher): mock_socket = mock.Mock() self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', lambda *x, **y: mock_socket)) server = wsgi.Server() server.default_port = 1234 server.configure_socket() self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1), mock_socket.mock_calls) self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), mock_socket.mock_calls) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertIn(mock.call.setsockopt( socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, wsgi.CONF.tcp_keepidle), mock_socket.mock_calls) def test_get_socket_with_bind_problems(self): self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', mock.Mock(side_effect=( [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None])))) self.assertRaises(RuntimeError, wsgi.get_socket, 1234) def test_get_socket_with_unexpected_socket_errno(self): self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM)))) self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234) def _cleanup_uwsgi(): wsgi.uwsgi = None class Test_UwsgiChunkedFile(test_utils.BaseTestCase): def test_read_no_data(self): reader = wsgi._UWSGIChunkFile() wsgi.uwsgi = mock.MagicMock() self.addCleanup(_cleanup_uwsgi) def fake_read(): return None wsgi.uwsgi.chunked_read = fake_read out = reader.read() self.assertEqual(out, b'') def test_read_data_no_length(self): reader = wsgi._UWSGIChunkFile() wsgi.uwsgi = mock.MagicMock() self.addCleanup(_cleanup_uwsgi) values = iter([b'a', b'b', b'c', None]) def fake_read(): return next(values) wsgi.uwsgi.chunked_read = fake_read out = reader.read() self.assertEqual(out, b'abc') def test_read_zero_length(self): reader = wsgi._UWSGIChunkFile() self.assertEqual(b'', reader.read(length=0)) def test_read_data_length(self): reader = wsgi._UWSGIChunkFile() wsgi.uwsgi = mock.MagicMock() self.addCleanup(_cleanup_uwsgi) values = iter([b'a', b'b', b'c', None]) def fake_read(): return next(values) wsgi.uwsgi.chunked_read = fake_read out = reader.read(length=2) self.assertEqual(out, b'ab') def test_read_data_negative_length(self): reader = wsgi._UWSGIChunkFile() wsgi.uwsgi = mock.MagicMock() self.addCleanup(_cleanup_uwsgi) values = iter([b'a', b'b', b'c', None]) def fake_read(): return next(values) wsgi.uwsgi.chunked_read = fake_read out = reader.read(length=-2) self.assertEqual(out, b'abc') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/common/test_wsgi_app.py0000664000175000017500000002510400000000000023144 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2020, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.api import common from glance.api.v2 import cached_images import glance.async_ from glance.common import exception from glance.common import wsgi_app from glance import sqlite_migration from glance.tests import utils as test_utils class TestWsgiAppInit(test_utils.BaseTestCase): @mock.patch('glance.common.config.load_paste_app') @mock.patch('glance.async_.set_threadpool_model') @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_wsgi_init_sets_thread_settings(self, mock_migrate_db, mock_config_files, mock_set_model, mock_load): mock_migrate_db.return_value = False mock_config_files.return_value = [] self.config(task_pool_threads=123, group='wsgi') common.DEFAULT_POOL_SIZE = 1024 wsgi_app.init_app() # Make sure we declared the system threadpool model as native mock_set_model.assert_called_once_with('native') # Make sure we set the default pool size self.assertEqual(123, common.DEFAULT_POOL_SIZE) mock_load.assert_called_once_with('glance-api') @mock.patch('atexit.register') @mock.patch('glance.common.config.load_paste_app') @mock.patch('glance.async_.set_threadpool_model') @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_wsgi_init_registers_exit_handler(self, mock_migrate_db, mock_config_files, mock_set_model, mock_load, mock_exit): mock_migrate_db.return_value = False mock_config_files.return_value = [] wsgi_app.init_app() mock_exit.assert_called_once_with(wsgi_app.drain_workers) @mock.patch('glance.common.config.load_paste_app') @mock.patch('glance.async_.set_threadpool_model') @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_uwsgi_init_registers_exit_handler(self, mock_migrate_db, mock_config_files, mock_set_model, mock_load): mock_migrate_db.return_value = False mock_config_files.return_value = [] with mock.patch.object(wsgi_app, 'uwsgi') as mock_u: wsgi_app.init_app() self.assertEqual(mock_u.atexit, wsgi_app.drain_workers) @mock.patch('glance.api.v2.cached_images.WORKER') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) def test_drain_workers(self, mock_cache_worker): # Initialize the thread pool model and tasks_pool, like API # under WSGI would, and so we have a pointer to that exact # pool object in the cache glance.async_.set_threadpool_model('native') model = common.get_thread_pool('tasks_pool') with mock.patch.object(model.pool, 'shutdown') as mock_shutdown: wsgi_app.drain_workers() # Make sure that shutdown() was called on the tasks_pool # ThreadPoolExecutor mock_shutdown.assert_called_once_with() # Make sure we terminated the cache worker, if present. mock_cache_worker.terminate.assert_called_once_with() @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) def test_drain_workers_no_cache(self): glance.async_.set_threadpool_model('native') model = common.get_thread_pool('tasks_pool') with mock.patch.object(model.pool, 'shutdown'): # Make sure that with no WORKER initialized, we do not fail wsgi_app.drain_workers() self.assertIsNone(cached_images.WORKER) @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app') @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('threading.Thread') @mock.patch('glance.housekeeping.StagingStoreCleaner') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_runs_staging_cleanup(self, mock_migrate_db, mock_cleaner, mock_Thread, mock_conf, mock_load): mock_migrate_db.return_value = False mock_conf.return_value = [] wsgi_app.init_app() mock_Thread.assert_called_once_with( target=mock_cleaner().clean_orphaned_staging_residue, daemon=True) mock_Thread.return_value.start.assert_called_once_with() @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app') @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('threading.Timer') @mock.patch('glance.image_cache.prefetcher.Prefetcher') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_run_cache_prefetcher_middleware_disabled( self, mock_migrate_db, mock_prefetcher, mock_Timer, mock_conf, mock_load): mock_migrate_db.return_value = False mock_conf.return_value = [] wsgi_app.init_app() mock_Timer.assert_not_called() @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app', new=mock.MagicMock()) @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_staging_store_uri_assertion(self, mock_migrate_db, mock_conf): mock_migrate_db.return_value = False self.config(node_staging_uri='http://good.luck') mock_conf.return_value = [] # Make sure a staging URI with a bad scheme will abort startup self.assertRaises(exception.GlanceException, wsgi_app.init_app) @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app', new=mock.MagicMock()) @mock.patch('os.path.exists') @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') def test_staging_store_path_check(self, mock_migrate_db, mock_exists, mock_conf): mock_migrate_db.return_value = False mock_exists.return_value = False mock_conf.return_value = [] with mock.patch.object(wsgi_app, 'LOG') as mock_log: wsgi_app.init_app() # Make sure that a missing staging directory will log a warning. mock_log.warning.assert_called_once_with( 'Import methods are enabled but staging directory ' '%(path)s does not exist; Imports will fail!', {'path': '/tmp/staging/'}) @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app', new=mock.MagicMock()) @mock.patch('os.path.exists') @mock.patch('glance.sqlite_migration.get_db_path') @mock.patch('glance.sqlite_migration.Migrate.migrate') def test_sqlite_migrate(self, mock_migrate, mock_path, mock_exists, mock_conf): self.config(flavor='keystone+cache', group='paste_deploy') self.config(image_cache_driver='centralized_db') self.config(worker_self_reference_url='http://workerx') mock_path.return_value = 'fake_path' mock_exists.return_value = False mock_conf.return_value = [] wsgi_app.init_app() self.assertEqual(1, mock_migrate.call_count) @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app', new=mock.MagicMock()) @mock.patch('glance.sqlite_migration.Migrate.migrate') def test_sqlite_migrate_not_called(self, mock_migrate, mock_conf): self.config(flavor='keystone+cache', group='paste_deploy') self.config(image_cache_driver='sqlite') self.config(worker_self_reference_url='http://workerx') mock_conf.return_value = [] wsgi_app.init_app() self.assertEqual(0, mock_migrate.call_count) @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) @mock.patch('glance.common.config.load_paste_app', new=mock.MagicMock()) @mock.patch('os.path.exists') @mock.patch('os.path.join', new=mock.MagicMock()) @mock.patch('glance.sqlite_migration.can_migrate_to_central_db') @mock.patch('glance.sqlite_migration.Migrate.migrate') def test_sqlite_migrate_db_not_exist(self, mock_migrate, mock_can_migrate, mock_exists, mock_conf): self.config(flavor='keystone+cache', group='paste_deploy') self.config(image_cache_driver='centralized_db') self.config(worker_self_reference_url='http://workerx') mock_can_migrate.return_value = True mock_exists.return_value = False mock_conf.return_value = [] with mock.patch.object(sqlite_migration, 'LOG') as mock_log: wsgi_app.init_app() mock_log.debug.assert_called_once_with( 'SQLite caching database not located, skipping migration') self.assertEqual(0, mock_migrate.call_count) @mock.patch('glance.common.wsgi_app._get_config_files') @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) def test_worker_self_reference_url_not_set(self, mock_conf): self.config(flavor='keystone+cache', group='paste_deploy') self.config(image_cache_driver='centralized_db') mock_conf.return_value = [] self.assertRaises(RuntimeError, wsgi_app.init_app) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/fake_rados.py0000664000175000017500000000611600000000000021104 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class mock_rados(object): class ioctx(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def close(self, *args, **kwargs): pass class Rados(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def connect(self, *args, **kwargs): pass def open_ioctx(self, *args, **kwargs): return mock_rados.ioctx() def shutdown(self, *args, **kwargs): pass class mock_rbd(object): class ImageExists(Exception): pass class ImageBusy(Exception): pass class ImageNotFound(Exception): pass class Image(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): pass def create_snap(self, *args, **kwargs): pass def remove_snap(self, *args, **kwargs): pass def protect_snap(self, *args, **kwargs): pass def unprotect_snap(self, *args, **kwargs): pass def read(self, *args, **kwargs): raise NotImplementedError() def write(self, *args, **kwargs): raise NotImplementedError() def resize(self, *args, **kwargs): raise NotImplementedError() def discard(self, offset, length): raise NotImplementedError() def close(self): pass def list_snaps(self): raise NotImplementedError() def parent_info(self): raise NotImplementedError() def size(self): raise NotImplementedError() class RBD(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def create(self, *args, **kwargs): pass def remove(self, *args, **kwargs): pass def list(self, *args, **kwargs): raise NotImplementedError() def clone(self, *args, **kwargs): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/fixtures.py0000664000175000017500000001710000000000000020652 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Glance unit tests.""" # NOTE(mriedem): This is needed for importing from fixtures. import logging as std_logging import os from unittest import mock import warnings import fixtures as pyfixtures from openstack.identity.v3 import endpoint from openstack.identity.v3 import limit as klimit from oslo_db import warning as oslo_db_warning from oslo_limit import limit from sqlalchemy import exc as sqla_exc _TRUE_VALUES = ('True', 'true', '1', 'yes') class NullHandler(std_logging.Handler): """custom default NullHandler to attempt to format the record. Used in conjunction with log_fixture.get_logging_handle_error_fixture to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(pyfixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a Null Logger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super(StandardLogging, self).setUp() # set root logger to debug root = std_logging.getLogger() root.setLevel(std_logging.DEBUG) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in _TRUE_VALUES: level = std_logging.DEBUG else: level = std_logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( pyfixtures.FakeLogger(format=fs, level=None)) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > std_logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture( pyfixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(std_logging.DEBUG) # Don't log every single DB migration step std_logging.getLogger( 'alembic.runtime.migration').setLevel(std_logging.WARNING) # At times we end up calling back into main() functions in # testing. This has the possibility of calling logging.setup # again, which completely unwinds the logging capture we've # created here. Once we've setup the logging in the way we want, # disable the ability for the test to change this. def fake_logging_setup(*args): pass self.useFixture( pyfixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup)) class WarningsFixture(pyfixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super(WarningsFixture, self).setUp() self._original_warning_filters = warnings.filters[:] # NOTE(sdague): Make deprecation warnings only happen once. Otherwise # this gets kind of crazy given the way that upstream python libs use # this. warnings.simplefilter('once', DeprecationWarning) # NOTE(sdague): this remains an unresolved item around the way # forward on is_admin, the deprecation is definitely really premature. warnings.filterwarnings( 'ignore', message='Policy enforcement is depending on the value of is_admin.' ' This key is deprecated. Please update your policy ' 'file to use the standard policy values.') # NOTE(mriedem): user/tenant is deprecated in oslo.context so don't # let anything new use it warnings.filterwarnings( 'error', message="Property '.*' has moved to '.*'") # Don't warn for our own deprecation warnings warnings.filterwarnings( 'ignore', module='glance', category=DeprecationWarning, ) # Disable deprecation warning for oslo.db's EngineFacade. We *really* # need to get off this but it's not happening while sqlalchemy 2.0 # stuff is ongoing warnings.filterwarnings( 'ignore', category=oslo_db_warning.OsloDBDeprecationWarning, message='EngineFacade is deprecated', ) # Enable deprecation warnings for glance itself to capture upcoming # SQLAlchemy changes warnings.filterwarnings( 'ignore', category=sqla_exc.SADeprecationWarning, ) warnings.filterwarnings( 'error', module='glance', category=sqla_exc.SADeprecationWarning, ) # Enable general SQLAlchemy warnings also to ensure we're not doing # silly stuff. It's possible that we'll need to filter things out here # with future SQLAlchemy versions, but that's a good thing warnings.filterwarnings( 'error', module='glance', category=sqla_exc.SAWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters class KeystoneQuotaFixture(pyfixtures.Fixture): def __init__(self, **defaults): self.defaults = defaults def setUp(self): super(KeystoneQuotaFixture, self).setUp() self.mock_conn = mock.MagicMock() limit._SDK_CONNECTION = self.mock_conn mock_gem = self.useFixture( pyfixtures.MockPatch('oslo_limit.limit.Enforcer.' '_get_enforcement_model')).mock mock_gem.return_value = 'flat' fake_endpoint = endpoint.Endpoint() fake_endpoint.service_id = "service_id" fake_endpoint.region_id = "region_id" self.mock_conn.get_endpoint.return_value = fake_endpoint def fake_limits(service_id, region_id, resource_name, project_id): this_limit = klimit.Limit() this_limit.resource_name = resource_name this_limit.resource_limit = self.defaults[resource_name] return iter([this_limit]) self.mock_conn.limits.side_effect = fake_limits ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/image_cache/0000775000175000017500000000000000000000000020635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/image_cache/__init__.py0000664000175000017500000000000000000000000022734 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/image_cache/drivers/0000775000175000017500000000000000000000000022313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/image_cache/drivers/__init__.py0000664000175000017500000000000000000000000024412 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/image_cache/drivers/test_sqlite.py0000664000175000017500000000232200000000000025224 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the sqlite image_cache driver. """ import os from unittest import mock import ddt from glance.image_cache.drivers import sqlite from glance.tests import utils @ddt.ddt class TestSqlite(utils.BaseTestCase): @ddt.data(True, False) def test_delete_cached_file(self, throw_not_exists): with mock.patch.object(os, 'unlink') as mock_unlink: if throw_not_exists: mock_unlink.side_effect = OSError((2, 'File not found')) # Should not raise an exception in all cases sqlite.delete_cached_file('/tmp/dummy_file') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.910309 glance-29.0.0/glance/tests/unit/keymgr/0000775000175000017500000000000000000000000017726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/keymgr/__init__.py0000664000175000017500000000000000000000000022025 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/keymgr/fake.py0000664000175000017500000000160500000000000021210 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # Copyright 2019 Red Hat # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from castellan.tests.unit.key_manager import mock_key_manager def fake_api(configuration=None): return mock_key_manager.MockKeyManager(configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_auth.py0000664000175000017500000005061700000000000021013 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http from oslo_serialization import jsonutils import webob from glance.common import auth from glance.common import exception from glance.tests import utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' class FakeResponse(object): """ Simple class that masks the inconsistency between webob.Response.status_int and httplib.Response.status """ def __init__(self, resp): self.resp = resp def __getitem__(self, key): return self.resp.headers.get(key) @property def status(self): return self.resp.status_int class V2Token(object): def __init__(self): self.tok = self.base_token def add_service_no_type(self): catalog = self.tok['access']['serviceCatalog'] service_type = {"name": "glance_no_type"} catalog.append(service_type) service = catalog[-1] service['endpoints'] = [self.base_endpoint] def add_service(self, s_type, region_list=None): if region_list is None: region_list = [] catalog = self.tok['access']['serviceCatalog'] service_type = {"type": s_type, "name": "glance"} catalog.append(service_type) service = catalog[-1] endpoint_list = [] if not region_list: endpoint_list.append(self.base_endpoint) else: for region in region_list: endpoint = self.base_endpoint endpoint['region'] = region endpoint_list.append(endpoint) service['endpoints'] = endpoint_list @property def token(self): return self.tok @property def base_endpoint(self): return { "adminURL": "http://localhost:9292", "internalURL": "http://localhost:9292", "publicURL": "http://localhost:9292" } @property def base_token(self): return { "access": { "token": { "expires": "2010-11-23T16:40:53.321584", "id": "5c7f8799-2e54-43e4-851b-31f81871b6c", "tenant": {"id": "1", "name": "tenant-ok"} }, "serviceCatalog": [ ], "user": { "id": "2", "roles": [{ "tenantId": "1", "id": "1", "name": "Admin" }], "name": "joeadmin" } } } class TestKeystoneAuthPlugin(utils.BaseTestCase): """Test that the Keystone auth plugin works properly""" def setUp(self): super(TestKeystoneAuthPlugin, self).setUp() def test_get_plugin_from_strategy_keystone(self): strategy = auth.get_plugin_from_strategy('keystone') self.assertIsInstance(strategy, auth.KeystoneStrategy) self.assertTrue(strategy.configure_via_auth) def test_get_plugin_from_strategy_keystone_configure_via_auth_false(self): strategy = auth.get_plugin_from_strategy('keystone', configure_via_auth=False) self.assertIsInstance(strategy, auth.KeystoneStrategy) self.assertFalse(strategy.configure_via_auth) def test_required_creds(self): """ Test that plugin created without required credential pieces raises an exception """ bad_creds = [ {}, # missing everything { 'username': 'user1', 'strategy': 'keystone', 'password': 'pass' }, # missing auth_url { 'password': 'pass', 'strategy': 'keystone', 'auth_url': 'http://localhost/v1' }, # missing username { 'username': 'user1', 'strategy': 'keystone', 'auth_url': 'http://localhost/v1' }, # missing password { 'username': 'user1', 'password': 'pass', 'auth_url': 'http://localhost/v1' }, # missing strategy { 'username': 'user1', 'password': 'pass', 'strategy': 'keystone', 'auth_url': 'http://localhost/v2.0/' }, # v2.0: missing tenant { 'username': None, 'password': 'pass', 'auth_url': 'http://localhost/v2.0/' }, # None parameter { 'username': 'user1', 'password': 'pass', 'auth_url': 'http://localhost/v2.0/', 'tenant': None } # None tenant ] for creds in bad_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise correct exception when supplying " "bad credentials: %r" % creds) except exception.MissingCredentialError: continue # Expected def test_invalid_auth_url_v1(self): """ Test that a 400 during authenticate raises exception.AuthBadRequest """ def fake_do_request(*args, **kwargs): resp = webob.Response() resp.status = http.BAD_REQUEST return FakeResponse(resp), "" self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request) bad_creds = { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/', 'password': 'pass', 'strategy': 'keystone', 'region': 'RegionOne' } plugin = auth.KeystoneStrategy(bad_creds) self.assertRaises(exception.AuthBadRequest, plugin.authenticate) def test_invalid_auth_url_v2(self): """ Test that a 400 during authenticate raises exception.AuthBadRequest """ def fake_do_request(*args, **kwargs): resp = webob.Response() resp.status = http.BAD_REQUEST return FakeResponse(resp), "" self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request) bad_creds = { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/v2.0/', 'password': 'pass', 'tenant': 'tenant1', 'strategy': 'keystone', 'region': 'RegionOne' } plugin = auth.KeystoneStrategy(bad_creds) self.assertRaises(exception.AuthBadRequest, plugin.authenticate) def test_v1_auth(self): """Test v1 auth code paths""" def fake_do_request(cls, url, method, headers=None, body=None): if url.find("2.0") != -1: self.fail("Invalid v1.0 token path (%s)" % url) headers = headers or {} resp = webob.Response() if (headers.get('X-Auth-User') != 'user1' or headers.get('X-Auth-Key') != 'pass'): resp.status = http.UNAUTHORIZED else: resp.status = http.OK resp.headers.update({"x-image-management-url": "example.com"}) return FakeResponse(resp), "" self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request) unauthorized_creds = [ { 'username': 'wronguser', 'auth_url': 'http://localhost/badauthurl/', 'strategy': 'keystone', 'region': 'RegionOne', 'password': 'pass' }, # wrong username { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/', 'strategy': 'keystone', 'region': 'RegionOne', 'password': 'badpass' }, # bad password... ] for creds in unauthorized_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise NotAuthenticated when supplying " "bad credentials: %r" % creds) except exception.NotAuthenticated: continue # Expected no_strategy_creds = { 'username': 'user1', 'auth_url': 'http://localhost/redirect/', 'password': 'pass', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(no_strategy_creds) plugin.authenticate() self.fail("Failed to raise MissingCredentialError when " "supplying no strategy: %r" % no_strategy_creds) except exception.MissingCredentialError: pass # Expected good_creds = [ { 'username': 'user1', 'auth_url': 'http://localhost/redirect/', 'password': 'pass', 'strategy': 'keystone', 'region': 'RegionOne' } ] for creds in good_creds: plugin = auth.KeystoneStrategy(creds) self.assertIsNone(plugin.authenticate()) self.assertEqual("example.com", plugin.management_url) # Assert it does not update management_url via auth response for creds in good_creds: plugin = auth.KeystoneStrategy(creds, configure_via_auth=False) self.assertIsNone(plugin.authenticate()) self.assertIsNone(plugin.management_url) def test_v2_auth(self): """Test v2 auth code paths""" mock_token = None def fake_do_request(cls, url, method, headers=None, body=None): if (not url.rstrip('/').endswith('v2.0/tokens') or url.count("2.0") != 1): self.fail("Invalid v2.0 token path (%s)" % url) creds = jsonutils.loads(body)['auth'] username = creds['passwordCredentials']['username'] password = creds['passwordCredentials']['password'] tenant = creds['tenantName'] resp = webob.Response() if (username != 'user1' or password != 'pass' or tenant != 'tenant-ok'): resp.status = http.UNAUTHORIZED else: resp.status = http.OK body = mock_token.token return FakeResponse(resp), jsonutils.dumps(body) mock_token = V2Token() mock_token.add_service('image', ['RegionOne']) self.mock_object(auth.KeystoneStrategy, '_do_request', fake_do_request) unauthorized_creds = [ { 'username': 'wronguser', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # wrong username { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'badpass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # bad password... { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'carterhayes', 'strategy': 'keystone', 'region': 'RegionOne' }, # bad tenant... ] for creds in unauthorized_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise NotAuthenticated when supplying " "bad credentials: %r" % creds) except exception.NotAuthenticated: continue # Expected no_region_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'strategy': 'keystone' } plugin = auth.KeystoneStrategy(no_region_creds) self.assertIsNone(plugin.authenticate()) self.assertEqual('http://localhost:9292', plugin.management_url) # Add another image service, with a different region mock_token.add_service('image', ['RegionTwo']) try: plugin = auth.KeystoneStrategy(no_region_creds) plugin.authenticate() self.fail("Failed to raise RegionAmbiguity when no region present " "and multiple regions exist: %r" % no_region_creds) except exception.RegionAmbiguity: pass # Expected wrong_region_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'strategy': 'keystone', 'region': 'NonExistentRegion' } try: plugin = auth.KeystoneStrategy(wrong_region_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when supplying " "wrong region: %r" % wrong_region_creds) except exception.NoServiceEndpoint: pass # Expected no_strategy_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(no_strategy_creds) plugin.authenticate() self.fail("Failed to raise MissingCredentialError when " "supplying no strategy: %r" % no_strategy_creds) except exception.MissingCredentialError: pass # Expected bad_strategy_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'region': 'RegionOne', 'strategy': 'keypebble' } try: plugin = auth.KeystoneStrategy(bad_strategy_creds) plugin.authenticate() self.fail("Failed to raise BadAuthStrategy when supplying " "bad auth strategy: %r" % bad_strategy_creds) except exception.BadAuthStrategy: pass # Expected mock_token = V2Token() mock_token.add_service('image', ['RegionOne', 'RegionTwo']) good_creds = [ { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # auth_url with trailing '/' { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # auth_url without trailing '/' { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionTwo' } # Second region ] for creds in good_creds: plugin = auth.KeystoneStrategy(creds) self.assertIsNone(plugin.authenticate()) self.assertEqual('http://localhost:9292', plugin.management_url) ambiguous_region_creds = { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' } mock_token = V2Token() # Add two identical services mock_token.add_service('image', ['RegionOne']) mock_token.add_service('image', ['RegionOne']) try: plugin = auth.KeystoneStrategy(ambiguous_region_creds) plugin.authenticate() self.fail("Failed to raise RegionAmbiguity when " "non-unique regions exist: %r" % ambiguous_region_creds) except exception.RegionAmbiguity: pass mock_token = V2Token() mock_token.add_service('bad-image', ['RegionOne']) good_creds = { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(good_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when bad service " "type encountered") except exception.NoServiceEndpoint: pass mock_token = V2Token() mock_token.add_service_no_type() try: plugin = auth.KeystoneStrategy(good_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when bad service " "type encountered") except exception.NoServiceEndpoint: pass try: plugin = auth.KeystoneStrategy(good_creds, configure_via_auth=False) plugin.authenticate() except exception.NoServiceEndpoint: self.fail("NoServiceEndpoint was raised when authenticate " "should not check for endpoint.") class TestEndpoints(utils.BaseTestCase): def setUp(self): super(TestEndpoints, self).setUp() self.service_catalog = [ { 'endpoint_links': [], 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://internalURL/', 'publicURL': 'http://publicURL/', }, ], 'type': 'object-store', 'name': 'Object Storage Service', } ] def test_get_endpoint_with_custom_server_type(self): endpoint = auth.get_endpoint(self.service_catalog, service_type='object-store') self.assertEqual('http://publicURL/', endpoint) def test_get_endpoint_with_custom_endpoint_type(self): endpoint = auth.get_endpoint(self.service_catalog, service_type='object-store', endpoint_type='internalURL') self.assertEqual('http://internalURL/', endpoint) def test_get_endpoint_raises_with_invalid_service_type(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='foo') def test_get_endpoint_raises_with_invalid_endpoint_type(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='object-store', endpoint_type='foo') def test_get_endpoint_raises_with_invalid_endpoint_region(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='object-store', endpoint_region='foo', endpoint_type='internalURL') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_cache_manage.py0000664000175000017500000000764700000000000022432 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import fixtures from glance.cmd import cache_manage from glance.image_cache import client as cache_client from glance.tests import utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class TestCacheManage(test_utils.BaseTestCase): def setUp(self): super(TestCacheManage, self).setUp() def _main_test_helper(self, argv, result=cache_manage.SUCCESS): self.useFixture(fixtures.MonkeyPatch('sys.argv', argv)) with mock.patch.object(cache_client, 'get_client'): with mock.patch.object(sys, 'exit') as mock_exit: cache_manage.main() mock_exit.assert_called_once_with(result) def test_list_cached_images(self): self._main_test_helper(['glance.cmd.cache_manage', 'list-cached']) def test_list_queued_images(self): self._main_test_helper(['glance.cmd.cache_manage', 'list-queued']) @mock.patch.object(cache_manage, 'user_confirm') def test_queue_image(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'queue-image', UUID1]) self.assertEqual(1, mock_user_confirm.call_count) @mock.patch.object(cache_manage, 'user_confirm') def test_queue_image_invalid_image_id(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'queue-image', 'fake_id'], result=cache_manage.FAILURE) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_queued_image(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-queued-image', UUID1]) self.assertEqual(1, mock_user_confirm.call_count) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_queued_image_invalid_image_id(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-queued-image', 'fake_id'], result=cache_manage.FAILURE) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_cached_image(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-cached-image', UUID1]) self.assertEqual(1, mock_user_confirm.call_count) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_cached_image_invalid_image_id(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-cached-image', 'fake_id'], result=cache_manage.FAILURE) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_all_queued_image(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-all-queued-images']) self.assertEqual(1, mock_user_confirm.call_count) @mock.patch.object(cache_manage, 'user_confirm') def test_delete_all_cached_image(self, mock_user_confirm): self._main_test_helper(['glance.cmd.cache_manage', 'delete-all-cached-images']) self.assertEqual(1, mock_user_confirm.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_cache_middleware.py0000664000175000017500000004337400000000000023314 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http from unittest.mock import patch from oslo_log.fixture import logging_error as log_fixture from oslo_policy import policy from oslo_utils.fixture import uuidsentinel as uuids import testtools import webob import glance.api.middleware.cache import glance.api.policy from glance.common import exception from glance import context from glance.tests.unit import base from glance.tests.unit import fixtures as glance_fixtures from glance.tests.unit import test_policy from glance.tests.unit import utils as unit_test_utils class ImageStub(object): def __init__(self, image_id, owner, extra_properties=None, visibility='private'): if extra_properties is None: extra_properties = {} self.image_id = image_id self.visibility = visibility self.status = 'active' self.extra_properties = extra_properties self.checksum = 'c1234' self.size = 123456789 self.os_hash_algo = None self.container_format = 'bare' self.disk_format = 'raw' self.updated_at = self.created_at = None self.name = 'foo' self.min_disk = self.min_ram = 0 self.protected = False self.os_hidden = False self.checksum = 0 self.os_hash_algo = 'md5' self.os_hash_value = None self.owner = owner self.virtual_size = 0 self.tags = [] self.member = self.owner class TestCacheMiddlewareURLMatching(testtools.TestCase): def setUp(self): super().setUp() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) def test_v2_match_id(self): req = webob.Request.blank('/v2/images/asdf/file') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertEqual(('v2', 'GET', 'asdf'), out) def test_v2_no_match_bad_path(self): req = webob.Request.blank('/v2/images/asdf') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) def test_no_match_unknown_version(self): req = webob.Request.blank('/v3/images/asdf') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) class TestCacheMiddlewareRequestStashCacheInfo(testtools.TestCase): def setUp(self): super(TestCacheMiddlewareRequestStashCacheInfo, self).setUp() self.request = webob.Request.blank('') self.middleware = glance.api.middleware.cache.CacheFilter # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) def test_stash_cache_request_info(self): self.middleware._stash_request_info(self.request, 'asdf', 'GET', 'v2') self.assertEqual('asdf', self.request.environ['api.cache.image_id']) self.assertEqual('GET', self.request.environ['api.cache.method']) self.assertEqual('v2', self.request.environ['api.cache.version']) def test_fetch_cache_request_info(self): self.request.environ['api.cache.image_id'] = 'asdf' self.request.environ['api.cache.method'] = 'GET' self.request.environ['api.cache.version'] = 'v2' (image_id, method, version) = self.middleware._fetch_request_info( self.request) self.assertEqual('asdf', image_id) self.assertEqual('GET', method) self.assertEqual('v2', version) def test_fetch_cache_request_info_unset(self): out = self.middleware._fetch_request_info(self.request) self.assertIsNone(out) class ChecksumTestCacheFilter(glance.api.middleware.cache.CacheFilter): def __init__(self): class DummyCache(object): def get_caching_iter(self, image_id, image_checksum, app_iter): self.image_checksum = image_checksum self.cache = DummyCache() self.policy = unit_test_utils.FakePolicyEnforcer() class TestCacheMiddlewareChecksumVerification(base.IsolatedUnitTest): def setUp(self): super(TestCacheMiddlewareChecksumVerification, self).setUp() self.context = context.RequestContext(is_admin=True) self.request = webob.Request.blank('') self.request.context = self.context def test_checksum_v2_header(self): cache_filter = ChecksumTestCacheFilter() headers = { "x-image-meta-checksum": "1234567890", "Content-MD5": "abcdefghi" } resp = webob.Response(request=self.request, headers=headers) cache_filter._process_GET_response(resp, None) self.assertEqual("abcdefghi", cache_filter.cache.image_checksum) def test_checksum_missing_header(self): cache_filter = ChecksumTestCacheFilter() resp = webob.Response(request=self.request) cache_filter._process_GET_response(resp, None) self.assertIsNone(cache_filter.cache.image_checksum) class FakeImageSerializer(object): def show(self, response, raw_response): return True class ProcessRequestTestCacheFilter(glance.api.middleware.cache.CacheFilter): def __init__(self): self.serializer = FakeImageSerializer() class DummyCache(object): def __init__(self): self.deleted_images = [] def is_cached(self, image_id): return True def get_caching_iter(self, image_id, image_checksum, app_iter): pass def delete_cached_image(self, image_id): self.deleted_images.append(image_id) def get_image_size(self, image_id): pass self.cache = DummyCache() self.policy = unit_test_utils.FakePolicyEnforcer() class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest): def _enforcer_from_rules(self, unparsed_rules): rules = policy.Rules.from_dict(unparsed_rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) enforcer.set_rules(rules, overwrite=True) return enforcer def test_verify_metadata_deleted_image(self): """ Test verify_metadata raises exception.NotFound for a deleted image """ image_meta = {'status': 'deleted', 'is_public': True, 'deleted': True} cache_filter = ProcessRequestTestCacheFilter() self.assertRaises(exception.NotFound, cache_filter._verify_metadata, image_meta) def _test_verify_metadata_zero_size(self, image_meta): """ Test verify_metadata updates metadata with cached image size for images with 0 size. :param image_meta: Image metadata, which may be either an ImageTarget instance or a legacy v1 dict. """ image_size = 1 cache_filter = ProcessRequestTestCacheFilter() with patch.object(cache_filter.cache, 'get_image_size', return_value=image_size): cache_filter._verify_metadata(image_meta) self.assertEqual(image_size, image_meta['size']) def test_verify_metadata_zero_size(self): """ Test verify_metadata updates metadata with cached image size for images with 0 size """ image_meta = {'size': 0, 'deleted': False, 'id': 'test1', 'status': 'active'} self._test_verify_metadata_zero_size(image_meta) def test_verify_metadata_is_image_target_instance_with_zero_size(self): """ Test verify_metadata updates metadata which is ImageTarget instance """ image = ImageStub('test1', uuids.owner) image.size = 0 image_meta = glance.api.policy.ImageTarget(image) self._test_verify_metadata_zero_size(image_meta) def test_v2_process_request_response_headers(self): def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext() image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'properties': {}, } image = ImageStub(image_id, request.context.project_id) request.environ['api.cache.image'] = image for k, v in image_meta.items(): setattr(image, k, v) cache_filter = ProcessRequestTestCacheFilter() response = cache_filter._process_v2_request( request, image_id, dummy_img_iterator, image_meta) self.assertEqual('application/octet-stream', response.headers['Content-Type']) self.assertEqual('c1234', response.headers['Content-MD5']) self.assertEqual('123456789', response.headers['Content-Length']) def test_v2_process_request_without_checksum(self): def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext() image = ImageStub(image_id, request.context.project_id) image.checksum = None request.environ['api.cache.image'] = image image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'size': '123456789', } cache_filter = ProcessRequestTestCacheFilter() response = cache_filter._process_v2_request( request, image_id, dummy_img_iterator, image_meta) self.assertNotIn('Content-MD5', response.headers.keys()) def test_process_request_without_download_image_policy(self): """ Test for cache middleware skip processing when request context has not 'download_image' role. """ def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, request.context.project_id) return image, {'status': 'active', 'properties': {}} image_id = 'test1' request = webob.Request.blank('/v2/images/%s/file' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata enforcer = self._enforcer_from_rules({ 'get_image': '', 'download_image': '!' }) cache_filter.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_request, request) def test_v2_process_request_download_restricted(self): """ Test process_request for v2 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, request.context.project_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return image, glance.api.policy.ImageTarget(image) enforcer = self._enforcer_from_rules({ "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted", "get_image": "" }) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['_member_']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata cache_filter.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_request, request) def test_v2_process_request_download_permitted(self): """ Test process_request for v2 api where member role able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, request.context.project_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return image, glance.api.policy.ImageTarget(image) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['member']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) actual = cache_filter.process_request(request) self.assertTrue(actual) class TestCacheMiddlewareProcessResponse(base.IsolatedUnitTest): def test_get_status_code(self): headers = {"x-image-meta-deleted": True} resp = webob.Response(headers=headers) cache_filter = ProcessRequestTestCacheFilter() actual = cache_filter.get_status_code(resp) self.assertEqual(http.OK, actual) def test_v2_process_response_download_restricted(self): """ Test process_response for v2 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v2') def fake_get_v2_image_metadata(*args, **kwargs): image = test_policy.ImageStub( image_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return image, glance.api.policy.ImageTarget(image) cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted", "get_image": "" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['_member_']) resp = webob.Response(request=request) self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_response, resp) def test_v2_process_response_download_permitted(self): """ Test process_response for v2 api where member role able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v2') def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, request.context.project_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return image, glance.api.policy.ImageTarget(image) cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['member']) resp = webob.Response(request=request) actual = cache_filter.process_response(resp) self.assertEqual(resp, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_cached_images.py0000664000175000017500000004125500000000000022604 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Yahoo! Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import webob from glance.api.v2 import cached_images import glance.gateway from glance import image_cache from glance import notifier import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' class FakeImage(object): def __init__(self, id=None, status='active', container_format='ami', disk_format='ami', locations=None): self.id = id or UUID4 self.status = status self.container_format = container_format self.disk_format = disk_format self.locations = locations self.owner = unit_test_utils.TENANT1 self.created_at = '' self.updated_at = '' self.min_disk = '' self.min_ram = '' self.protected = False self.checksum = '' self.os_hash_algo = '' self.os_hash_value = '' self.size = 0 self.virtual_size = 0 self.visibility = 'public' self.os_hidden = False self.name = 'foo' self.tags = [] self.extra_properties = {} self.member = self.owner # NOTE(danms): This fixture looks more like the db object than # the proxy model. This needs fixing all through the tests # below. self.image_id = self.id class FakeCache(image_cache.ImageCache): def __init__(self): self.init_driver() self.deleted_images = [] def init_driver(self): pass def get_cached_images(self): return [{'image_id': 'test'}] def delete_cached_image(self, image_id): self.deleted_images.append(image_id) def delete_all_cached_images(self): self.delete_cached_image( self.get_cached_images()[0].get('image_id')) return 1 def get_queued_images(self): return {'test': 'passed'} def queue_image(self, image_id): return 'pass' def delete_queued_image(self, image_id): self.deleted_images.append(image_id) def delete_all_queued_images(self): self.delete_queued_image('deleted_img') return 1 class FakeController(cached_images.CacheController): def __init__(self): self.cache = FakeCache() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() self.gateway = glance.gateway.Gateway(self.db, self.store, self.notifier, self.policy) class TestController(test_utils.BaseTestCase): def test_initialization_without_conf(self): # NOTE(abhishekk): Since we are initializing cache driver only # if image_cache_dir is set, here we are checking that cache # object is None when it is not set caching_controller = cached_images.CacheController() self.assertIsNone(caching_controller.cache) class TestCachedImages(test_utils.BaseTestCase): def setUp(self): super(TestCachedImages, self).setUp() test_controller = FakeController() self.controller = test_controller def test_get_cached_images(self): self.config(image_cache_dir='fake_cache_directory') req = webob.Request.blank('') req.context = 'test' result = self.controller.get_cached_images(req) self.assertEqual({'cached_images': [{'image_id': 'test'}]}, result) def test_delete_cached_image(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.controller.delete_cached_image(req, image_id=UUID4) self.assertEqual([UUID4], self.controller.cache.deleted_images) def test_delete_cached_images(self): self.config(image_cache_dir='fake_cache_directory') req = webob.Request.blank('') req.context = 'test' self.assertEqual({'num_deleted': 1}, self.controller.delete_cached_images(req)) self.assertEqual(['test'], self.controller.cache.deleted_images) def test_get_queued_images(self): self.config(image_cache_dir='fake_cache_directory') req = webob.Request.blank('') req.context = 'test' result = self.controller.get_queued_images(req) self.assertEqual({'queued_images': {'test': 'passed'}}, result) def test_queue_image(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.controller.queue_image(req, image_id=UUID4) def test_delete_queued_image(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.controller.delete_queued_image(req, UUID4) self.assertEqual([UUID4], self.controller.cache.deleted_images) def test_delete_queued_images(self): self.config(image_cache_dir='fake_cache_directory') req = webob.Request.blank('') req.context = 'test' self.assertEqual({'num_deleted': 1}, self.controller.delete_queued_images(req)) self.assertEqual(['deleted_img'], self.controller.cache.deleted_images) class TestCachedImagesNegative(test_utils.BaseTestCase): def setUp(self): super(TestCachedImagesNegative, self).setUp() test_controller = FakeController() self.controller = test_controller def test_get_cached_images_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.get_cached_images, req) def test_get_cached_images_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_cached_images, req) def test_delete_cached_image_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_cached_image, req, image_id='test') def test_delete_cached_image_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_cached_image, req, image_id=UUID4) def test_delete_cached_images_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_cached_images, req) def test_delete_cached_images_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_cached_images, req) def test_get_queued_images_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.get_queued_images, req) def test_get_queued_images_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_queued_images, req) def test_queue_image_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.queue_image, req, image_id='test1') def test_queue_image_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.queue_image, req, image_id=UUID4) def test_delete_queued_image_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_queued_image, req, image_id='test1') def test_delete_queued_image_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_queued_image, req, image_id=UUID4) def test_delete_queued_images_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_queued_images, req) def test_delete_queued_images_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"manage_image_cache": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_queued_images, req) def test_delete_cache_entry_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"cache_delete": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_cache_entry, req, image_id=UUID4) def test_delete_cache_entry_disabled(self): req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_cache_entry, req, image_id=UUID4) def test_delete_non_existing_cache_entries(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_cache_entry, req, image_id='non-existing-queued-image') def test_clear_cache_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"cache_delete": False} req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.clear_cache, req) def test_clear_cache_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.clear_cache, req) def test_cache_clear_invalid_target(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() req.headers.update({'x-image-cache-clear-target': 'invalid'}) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.clear_cache, req) def test_get_cache_state_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.get_cache_state, req) def test_get_cache_state_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"cache_list": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_cache_state, req) def test_queue_image_from_api_disabled(self): req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPNotFound, self.controller.queue_image_from_api, req, image_id='test1') def test_queue_image_from_api_forbidden(self): self.config(image_cache_dir='fake_cache_directory') self.controller.policy.rules = {"cache_image": False} req = unit_test_utils.get_fake_request() with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPForbidden, self.controller.queue_image_from_api, req, image_id=UUID4) def test_non_active_image_for_queue_api(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() for status in ('saving', 'queued', 'pending_delete', 'deactivated', 'importing', 'uploading'): with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status=status) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.queue_image_from_api, req, image_id=UUID4) def test_queue_api_non_existing_image_(self): self.config(image_cache_dir='fake_cache_directory') req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.queue_image_from_api, req, image_id='non-existing-image-id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_context.py0000664000175000017500000002073300000000000021532 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from glance import context from glance.tests.unit import utils as unit_utils from glance.tests import utils CONF = cfg.CONF def _fake_image(owner, is_public): return { 'id': None, 'owner': owner, 'visibility': 'public' if is_public else 'shared', } def _fake_membership(can_share=False): return {'can_share': can_share} class TestContext(utils.BaseTestCase): def setUp(self): super(TestContext, self).setUp() self.db_api = unit_utils.FakeDB() def do_visible(self, exp_res, img_owner, img_public, **kwargs): """ Perform a context visibility test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_visible() call on the context. """ img = _fake_image(img_owner, img_public) ctx = context.RequestContext(**kwargs) self.assertEqual(exp_res, self.db_api.is_image_visible(ctx, img)) def test_empty_public(self): """ Tests that an empty context (with is_admin set to True) can access an image with is_public set to True. """ self.do_visible(True, None, True, is_admin=True) def test_empty_public_owned(self): """ Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to True. """ self.do_visible(True, 'pattieblack', True, is_admin=True) def test_empty_private(self): """ Tests that an empty context (with is_admin set to True) can access an image with is_public set to False. """ self.do_visible(True, None, False, is_admin=True) def test_empty_private_owned(self): """ Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to False. """ self.do_visible(True, 'pattieblack', False, is_admin=True) def test_anon_public(self): """ Tests that an anonymous context (with is_admin set to False) can access an image with is_public set to True. """ self.do_visible(True, None, True) def test_anon_public_owned(self): """ Tests that an anonymous context (with is_admin set to False) can access an owned image with is_public set to True. """ self.do_visible(True, 'pattieblack', True) def test_anon_private(self): """ Tests that an anonymous context (with is_admin set to False) can access an unowned image with is_public set to False. """ self.do_visible(True, None, False) def test_anon_private_owned(self): """ Tests that an anonymous context (with is_admin set to False) cannot access an owned image with is_public set to False. """ self.do_visible(False, 'pattieblack', False) def test_auth_public(self): """ Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to True. """ self.do_visible(True, None, True, project_id='froggy') def test_auth_public_unowned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does not own) with is_public set to True. """ self.do_visible(True, 'pattieblack', True, project_id='froggy') def test_auth_public_owned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to True. """ self.do_visible(True, 'pattieblack', True, project_id='pattieblack') def test_auth_private(self): """ Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to False. """ self.do_visible(True, None, False, project_id='froggy') def test_auth_private_unowned(self): """ Tests that an authenticated context (with is_admin set to False) cannot access an image (which it does not own) with is_public set to False. """ self.do_visible(False, 'pattieblack', False, project_id='froggy') def test_auth_private_owned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to False. """ self.do_visible(True, 'pattieblack', False, project_id='pattieblack') def test_request_id(self): contexts = [context.RequestContext().request_id for _ in range(5)] # Check for uniqueness -- set() will normalize its argument self.assertEqual(5, len(set(contexts))) def test_service_catalog(self): ctx = context.RequestContext(service_catalog=['foo']) self.assertEqual(['foo'], ctx.service_catalog) def test_user_identity(self): ctx = context.RequestContext(user_id="user", project_id="tenant", domain_id="domain", user_domain_id="user-domain", project_domain_id="project-domain") self.assertEqual('user tenant domain user-domain project-domain', ctx.to_dict()["user_identity"]) def test_elevated(self): """Make sure we get a whole admin-capable context from elevated().""" ctx = context.RequestContext(service_catalog=['foo'], user_id='dan', project_id='openstack', roles=['member']) admin = ctx.elevated() self.assertEqual('dan', admin.user_id) self.assertEqual('openstack', admin.project_id) self.assertEqual(sorted(['member', 'admin']), sorted(admin.roles)) self.assertEqual(['foo'], admin.service_catalog) self.assertTrue(admin.is_admin) def test_elevated_again(self): """Make sure a second elevation looks the same.""" ctx = context.RequestContext(service_catalog=['foo'], user_id='dan', project_id='openstack', roles=['member']) admin = ctx.elevated() admin = admin.elevated() self.assertEqual('dan', admin.user_id) self.assertEqual('openstack', admin.project_id) self.assertEqual(sorted(['member', 'admin']), sorted(admin.roles)) self.assertEqual(['foo'], admin.service_catalog) self.assertTrue(admin.is_admin) @mock.patch('keystoneauth1.token_endpoint.Token') @mock.patch('keystoneauth1.session.Session') def test_get_ksa_client(self, mock_session, mock_token): # Make sure we can get a keystoneauth1 client from our context # with the token auth as expected. ctx = context.RequestContext(auth_token='token') # NOTE(danms): The auth config group and options are # dynamically registered. Tickling enough of the relevant # code to make that happen would significantly inflate the # amount of code here for no real gain, so we just mock the # CONF object. CONF.register_group(cfg.OptGroup('keystone_authtoken')) with mock.patch.object(CONF, 'keystone_authtoken') as ksat: ksat.identity_uri = 'http://keystone' client = context.get_ksa_client(ctx) self.assertEqual(mock_session.return_value, client) mock_session.assert_called_once_with(auth=mock_token.return_value) mock_token.assert_called_once_with('http://keystone', 'token') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_context_middleware.py0000664000175000017500000001413600000000000023727 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from glance.api.middleware import context import glance.context from glance.tests.unit import base class TestContextMiddleware(base.IsolatedUnitTest): def _build_request(self, roles=None, identity_status='Confirmed', service_catalog=None): req = webob.Request.blank('/') req.headers['x-auth-token'] = 'token1' req.headers['x-identity-status'] = identity_status req.headers['x-user-id'] = 'user1' req.headers['x-tenant-id'] = 'tenant1' _roles = roles or ['role1', 'role2'] req.headers['x-roles'] = ','.join(_roles) if service_catalog: req.headers['x-service-catalog'] = service_catalog return req def _build_middleware(self): return context.ContextMiddleware(None) def test_header_parsing(self): req = self._build_request() self._build_middleware().process_request(req) self.assertEqual('token1', req.context.auth_token) self.assertEqual('user1', req.context.user_id) self.assertEqual('tenant1', req.context.project_id) self.assertEqual(['role1', 'role2'], req.context.roles) def test_is_admin_flag(self): # is_admin check should look for 'admin' role by default req = self._build_request(roles=['admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # without the 'admin' role, is_admin should be False req = self._build_request() self._build_middleware().process_request(req) self.assertFalse(req.context.is_admin) # the admin_role config option was removed in Wallaby from oslo_config.cfg import NoSuchOptError self.assertRaises(NoSuchOptError, self.config, admin_role='role1') def test_roles_case_insensitive(self): # accept role from request req = self._build_request(roles=['Admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_stripping(self): # stripping extra spaces in request req = self._build_request(roles=['\trole1']) self._build_middleware().process_request(req) self.assertIn('role1', req.context.roles) def test_anonymous_access_enabled(self): req = self._build_request(identity_status='Nope') self.config(allow_anonymous_access=True) middleware = self._build_middleware() middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user_id) self.assertIsNone(req.context.project_id) self.assertEqual([], req.context.roles) self.assertFalse(req.context.is_admin) self.assertTrue(req.context.read_only) def test_anonymous_access_defaults_to_disabled(self): req = self._build_request(identity_status='Nope') middleware = self._build_middleware() self.assertRaises(webob.exc.HTTPUnauthorized, middleware.process_request, req) def test_service_catalog(self): catalog_json = "[{}]" req = self._build_request(service_catalog=catalog_json) self._build_middleware().process_request(req) self.assertEqual([{}], req.context.service_catalog) def test_invalid_service_catalog(self): catalog_json = "bad json" req = self._build_request(service_catalog=catalog_json) middleware = self._build_middleware() self.assertRaises(webob.exc.HTTPInternalServerError, middleware.process_request, req) def test_response(self): req = self._build_request() req.context = glance.context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] # Validate that request-id do not starts with 'req-req-' if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) class TestUnauthenticatedContextMiddleware(base.IsolatedUnitTest): def test_request(self): middleware = context.UnauthenticatedContextMiddleware(None) req = webob.Request.blank('/') middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user_id) self.assertIsNone(req.context.project_id) self.assertEqual([], req.context.roles) self.assertTrue(req.context.is_admin) def test_response(self): middleware = context.UnauthenticatedContextMiddleware(None) req = webob.Request.blank('/') req.context = glance.context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req middleware.process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') # Validate that request-id do not starts with 'req-req-' self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_data_migration_framework.py0000664000175000017500000002072000000000000025101 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.db.sqlalchemy.alembic_migrations import data_migrations from glance.tests import utils as test_utils class TestDataMigrationFramework(test_utils.BaseTestCase): @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_no_migrations(self, mock_find): mock_find.return_value = None self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_one_migration_no_pending(self, mock_find): mock_migration1 = mock.Mock() mock_migration1.has_migrations.return_value = False mock_find.return_value = [mock_migration1] self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_one_migration_with_pending(self, mock_find): mock_migration1 = mock.Mock() mock_migration1.has_migrations.return_value = True mock_find.return_value = [mock_migration1] self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_mult_migration_no_pending(self, mock_find): mock_migration1 = mock.Mock() mock_migration1.has_migrations.return_value = False mock_migration2 = mock.Mock() mock_migration2.has_migrations.return_value = False mock_migration3 = mock.Mock() mock_migration3.has_migrations.return_value = False mock_find.return_value = [mock_migration1, mock_migration2, mock_migration3] self.assertFalse(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_mult_migration_one_pending(self, mock_find): mock_migration1 = mock.Mock() mock_migration1.has_migrations.return_value = False mock_migration2 = mock.Mock() mock_migration2.has_migrations.return_value = True mock_migration3 = mock.Mock() mock_migration3.has_migrations.return_value = False mock_find.return_value = [mock_migration1, mock_migration2, mock_migration3] self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('glance.db.sqlalchemy.alembic_migrations.data_migrations' '._find_migration_modules') def test_has_pending_migrations_mult_migration_some_pending(self, mock_find): mock_migration1 = mock.Mock() mock_migration1.has_migrations.return_value = False mock_migration2 = mock.Mock() mock_migration2.has_migrations.return_value = True mock_migration3 = mock.Mock() mock_migration3.has_migrations.return_value = False mock_migration4 = mock.Mock() mock_migration4.has_migrations.return_value = True mock_find.return_value = [mock_migration1, mock_migration2, mock_migration3, mock_migration4] self.assertTrue(data_migrations.has_pending_migrations(mock.Mock())) @mock.patch('importlib.import_module') @mock.patch('pkgutil.iter_modules') def test_find_migrations(self, mock_iter, mock_import): def fake_iter_modules(blah): yield 'blah', 'zebra01', 'blah' yield 'blah', 'zebra02', 'blah' yield 'blah', 'yellow01', 'blah' yield 'blah', 'xray01', 'blah' yield 'blah', 'wrinkle01', 'blah' mock_iter.side_effect = fake_iter_modules zebra1 = mock.Mock() zebra1.has_migrations.return_value = mock.Mock() zebra1.migrate.return_value = mock.Mock() zebra2 = mock.Mock() zebra2.has_migrations.return_value = mock.Mock() zebra2.migrate.return_value = mock.Mock() fake_imported_modules = [zebra1, zebra2] mock_import.side_effect = fake_imported_modules actual = data_migrations._find_migration_modules('zebra') self.assertEqual(2, len(actual)) self.assertEqual(fake_imported_modules, actual) @mock.patch('pkgutil.iter_modules') def test_find_migrations_no_migrations(self, mock_iter): def fake_iter_modules(blah): yield 'blah', 'zebra01', 'blah' yield 'blah', 'yellow01', 'blah' yield 'blah', 'xray01', 'blah' yield 'blah', 'wrinkle01', 'blah' yield 'blah', 'victor01', 'blah' mock_iter.side_effect = fake_iter_modules actual = data_migrations._find_migration_modules('umbrella') self.assertEqual(0, len(actual)) self.assertEqual([], actual) def test_run_migrations(self): zebra1 = mock.Mock() zebra1.has_migrations.return_value = True zebra1.migrate.return_value = 100 zebra2 = mock.Mock() zebra2.has_migrations.return_value = True zebra2.migrate.return_value = 50 migrations = [zebra1, zebra2] engine = mock.Mock() actual = data_migrations._run_migrations(engine, migrations) self.assertEqual(150, actual) zebra1.has_migrations.assert_called_once_with(engine) zebra1.migrate.assert_called_once_with(engine) zebra2.has_migrations.assert_called_once_with(engine) zebra2.migrate.assert_called_once_with(engine) def test_run_migrations_with_one_pending_migration(self): zebra1 = mock.Mock() zebra1.has_migrations.return_value = False zebra1.migrate.return_value = 0 zebra2 = mock.Mock() zebra2.has_migrations.return_value = True zebra2.migrate.return_value = 50 migrations = [zebra1, zebra2] engine = mock.Mock() actual = data_migrations._run_migrations(engine, migrations) self.assertEqual(50, actual) zebra1.has_migrations.assert_called_once_with(engine) zebra1.migrate.assert_not_called() zebra2.has_migrations.assert_called_once_with(engine) zebra2.migrate.assert_called_once_with(engine) def test_run_migrations_with_no_migrations(self): migrations = [] actual = data_migrations._run_migrations(mock.Mock(), migrations) self.assertEqual(0, actual) @mock.patch('glance.db.migration.CURRENT_RELEASE', 'zebra') @mock.patch('importlib.import_module') @mock.patch('pkgutil.iter_modules') def test_migrate(self, mock_iter, mock_import): def fake_iter_modules(blah): yield 'blah', 'zebra01', 'blah' yield 'blah', 'zebra02', 'blah' yield 'blah', 'yellow01', 'blah' yield 'blah', 'xray01', 'blah' yield 'blah', 'xray02', 'blah' mock_iter.side_effect = fake_iter_modules zebra1 = mock.Mock() zebra1.has_migrations.return_value = True zebra1.migrate.return_value = 100 zebra2 = mock.Mock() zebra2.has_migrations.return_value = True zebra2.migrate.return_value = 50 fake_imported_modules = [zebra1, zebra2] mock_import.side_effect = fake_imported_modules engine = mock.Mock() actual = data_migrations.migrate(engine, 'zebra') self.assertEqual(150, actual) zebra1.has_migrations.assert_called_once_with(engine) zebra1.migrate.assert_called_once_with(engine) zebra2.has_migrations.assert_called_once_with(engine) zebra2.migrate.assert_called_once_with(engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_db.py0000664000175000017500000013321000000000000020426 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import encodeutils from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import timeutils from sqlalchemy import orm as sa_orm from glance.common import crypt from glance.common import exception import glance.context import glance.db from glance.db.sqlalchemy import api import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'glance.common.config') UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' UUID1_LOCATION = 'file:///path/to/image' UUID1_LOCATION_METADATA = {'key': 'value'} UUID3_LOCATION = 'http://somehost.com/place' CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c' CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55' TASK_ID_1 = 'b3006bd0-461e-4228-88ea-431c14e918b4' TASK_ID_2 = '07b6b562-6770-4c8b-a649-37a515144ce9' TASK_ID_3 = '72d16bb6-4d70-48a5-83fe-14bb842dc737' NODE_REFERENCE_ID_1 = 1 NODE_REFERENCE_ID_2 = 2 def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'is_public': False, 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } if 'visibility' in kwargs: obj.pop('is_public') obj.update(kwargs) return obj def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, } obj.update(kwargs) return obj def _db_task_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': kwargs.get('status', 'pending'), 'type': 'import', 'input': kwargs.get('input', {}), 'result': None, 'owner': None, 'image_id': kwargs.get('image_id'), 'user_id': kwargs.get('user_id'), 'request_id': kwargs.get('request_id'), 'message': None, 'expires_at': default_datetime + datetime.timedelta(days=365), 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj def _db_node_reference_fixture(node_id, node_url, **kwargs): obj = { 'node_reference_id': node_id, 'node_reference_url': node_url, } obj.update(kwargs) return obj def _db_cached_images_fixture(id, **kwargs): obj = { 'id': id, 'image_id': kwargs.get('image_id'), 'size': kwargs.get('size'), 'hits': kwargs.get('hits') } obj.update(kwargs) return obj class TestImageRepo(test_utils.BaseTestCase): def setUp(self): super(TestImageRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_factory = glance.domain.ImageFactory() self._create_images() self._create_image_members() # Centralized cache self._create_node_references() self._create_cached_images() def _create_node_references(self): self.node_references = [ _db_node_reference_fixture(NODE_REFERENCE_ID_1, 'node_url_1'), _db_node_reference_fixture(NODE_REFERENCE_ID_2, 'node_url_2'), ] [self.db.node_reference_create( None, node_reference['node_reference_url'], node_reference_id=node_reference['node_reference_id'] ) for node_reference in self.node_references] def _create_cached_images(self): self.cached_images = [ _db_cached_images_fixture(1, image_id=UUID1, size=256, hits=3), _db_cached_images_fixture(1, image_id=UUID3, size=1024, hits=0) ] [self.db.insert_cache_details( None, 'node_url_1', cached_image['image_id'], cached_image['size'], hits=cached_image['hits'] ) for cached_image in self.cached_images] def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM, name='1', size=256, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]), _db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1, name='2', size=512, is_public=False), _db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1, name='3', size=1024, is_public=True, locations=[{'url': UUID3_LOCATION, 'metadata': {}, 'status': 'active'}]), _db_fixture(UUID4, owner=TENANT4, name='4', size=2048), ] [self.db.image_create(None, image) for image in self.images] # Create tasks associated with image self.tasks = [ _db_task_fixture( TASK_ID_1, image_id=UUID1, status='completed', input={ "image_id": UUID1, "import_req": { "method": { "name": "glance-direct" }, "backend": ["fake-store"] }, }, user_id=USER1, request_id='fake-request-id', ), _db_task_fixture( TASK_ID_2, image_id=UUID1, status='completed', input={ "image_id": UUID1, "import_req": { "method": { "name": "copy-image" }, "all_stores": True, "all_stores_must_succeed": False, "backend": ["fake-store", "fake_store_1"] }, }, user_id=USER1, request_id='fake-request-id', ), _db_task_fixture( TASK_ID_3, status='completed', input={ "image_id": UUID2, "import_req": { "method": { "name": "glance-direct" }, "backend": ["fake-store"] }, }, ), ] [self.db.task_create(None, task) for task in self.tasks] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID2, TENANT2), _db_image_member_fixture(UUID2, TENANT3, status='accepted'), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_node_reference_get_by_url(self): node_reference = self.db.node_reference_get_by_url(self.context, 'node_url_1') self.assertEqual(NODE_REFERENCE_ID_1, node_reference['node_reference_id']) def test_node_reference_get_by_url_not_found(self): self.assertRaises(exception.NotFound, self.db.node_reference_get_by_url, self.context, 'garbage_url') def test_get_cached_images(self): # Two images are cached on node 'node_url_1' cached_images = self.db.get_cached_images(self.context, 'node_url_1') self.assertEqual(2, len(cached_images)) # Nothing is cached on node 'node_url_2' cached_images = self.db.get_cached_images(self.context, 'node_url_2') self.assertEqual(0, len(cached_images)) def test_get_hit_count(self): # Hit count will be 3 for image UUID1 self.assertEqual(3, self.db.get_hit_count(self.context, UUID1, 'node_url_1')) # Hit count will be 0 for uncached image self.assertEqual(0, self.db.get_hit_count(self.context, UUID2, 'node_url_1')) def test_delete_all_cached_images(self): # Verify that we have image cached cached_images = self.db.get_cached_images(self.context, 'node_url_1') self.assertEqual(2, len(cached_images)) # Delete cached images from node_url_1 self.db.delete_all_cached_images(self.context, 'node_url_1') # Verify that all cached images from node_url_1 are deleted cached_images = self.db.get_cached_images(self.context, 'node_url_1') self.assertEqual(0, len(cached_images)) def test_delete_cached_image(self): # Verify that we have image cached cached_images = self.db.get_cached_images(self.context, 'node_url_1') self.assertEqual(2, len(cached_images)) # Delete cached image from node_url_1 self.db.delete_cached_image(self.context, UUID1, 'node_url_1') # Verify that given image from node_url_1 is deleted cached_images = self.db.get_cached_images(self.context, 'node_url_1') self.assertEqual(1, len(cached_images)) def test_get_least_recently_accessed(self): recently_accessed = self.db.get_least_recently_accessed( self.context, 'node_url_1') # Verify we will only get one image in response self.assertEqual(UUID1, recently_accessed) def test_is_image_cached_for_node(self): # Verify UUID1 is cached for node_url_1 self.assertTrue(self.db.is_image_cached_for_node( self.context, 'node_url_1', UUID1)) # Verify UUID3 is not cached for node_url_2 self.assertFalse(self.db.is_image_cached_for_node( self.context, 'node_url_2', UUID3)) def test_update_hit_count(self): # Verify UUID1 on node_url_1 has 3 as hit count self.assertEqual(3, self.db.get_hit_count(self.context, UUID1, 'node_url_1')) # Update the hit count of UUID1 self.db.update_hit_count(self.context, UUID1, 'node_url_1') # Verify hit count is now 4 self.assertEqual(4, self.db.get_hit_count(self.context, UUID1, 'node_url_1')) def test_get(self): image = self.image_repo.get(UUID1) self.assertEqual(UUID1, image.image_id) self.assertEqual('1', image.name) self.assertEqual(set(['ping', 'pong']), image.tags) self.assertEqual('public', image.visibility) self.assertEqual('active', image.status) self.assertEqual(256, image.size) self.assertEqual(TENANT1, image.owner) def test_tasks_get_by_image(self): tasks = self.db.tasks_get_by_image(self.context, UUID1) self.assertEqual(2, len(tasks)) for task in tasks: self.assertEqual(USER1, task['user_id']) self.assertEqual('fake-request-id', task['request_id']) self.assertEqual(UUID1, task['image_id']) def test_tasks_get_by_image_not_exists(self): tasks = self.db.tasks_get_by_image(self.context, UUID3) self.assertEqual(0, len(tasks)) def test_location_value(self): image = self.image_repo.get(UUID3) self.assertEqual(UUID3_LOCATION, image.locations[0]['url']) def test_location_data_value(self): image = self.image_repo.get(UUID1) self.assertEqual(UUID1_LOCATION, image.locations[0]['url']) self.assertEqual(UUID1_LOCATION_METADATA, image.locations[0]['metadata']) def test_location_data_exists(self): image = self.image_repo.get(UUID2) self.assertEqual([], image.locations) def test_get_not_found(self): fake_uuid = str(uuid.uuid4()) exc = self.assertRaises(exception.ImageNotFound, self.image_repo.get, fake_uuid) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) def test_get_forbidden(self): self.assertRaises(exception.NotFound, self.image_repo.get, UUID4) def test_list(self): images = self.image_repo.list() image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids) def _do_test_list_status(self, status, expected): self.context = glance.context.RequestContext( user=USER1, tenant=TENANT3) self.image_repo = glance.db.ImageRepo(self.context, self.db) images = self.image_repo.list(member_status=status) self.assertEqual(expected, len(images)) def test_list_status(self): self._do_test_list_status(None, 3) def test_list_status_pending(self): self._do_test_list_status('pending', 2) def test_list_status_rejected(self): self._do_test_list_status('rejected', 2) def test_list_status_all(self): self._do_test_list_status('all', 3) def test_list_with_marker(self): full_images = self.image_repo.list() full_ids = [i.image_id for i in full_images] marked_images = self.image_repo.list(marker=full_ids[0]) actual_ids = [i.image_id for i in marked_images] self.assertEqual(full_ids[1:], actual_ids) def test_list_with_last_marker(self): images = self.image_repo.list() marked_images = self.image_repo.list(marker=images[-1].image_id) self.assertEqual(0, len(marked_images)) def test_limited_list(self): limited_images = self.image_repo.list(limit=2) self.assertEqual(2, len(limited_images)) def test_list_with_marker_and_limit(self): full_images = self.image_repo.list() full_ids = [i.image_id for i in full_images] marked_images = self.image_repo.list(marker=full_ids[0], limit=1) actual_ids = [i.image_id for i in marked_images] self.assertEqual(full_ids[1:2], actual_ids) def test_list_private_images(self): filters = {'visibility': 'private'} images = self.image_repo.list(filters=filters) self.assertEqual(0, len(images)) def test_list_shared_images(self): filters = {'visibility': 'shared'} images = self.image_repo.list(filters=filters) image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID2]), image_ids) def test_list_shared_images_other_tenant(self): # Create a private image owned by TENANT3 image5 = _db_fixture(uuids.image5, owner=TENANT3, name='5', size=512, is_public=False) self.db.image_create(None, image5) # Get a repo as TENANT3, since it has access to public, # shared, and private images context = glance.context.RequestContext(user=USER1, tenant=TENANT3) image_repo = glance.db.ImageRepo(context, self.db) images = {i.image_id: i for i in image_repo.list()} # No member set for public image UUID1 self.assertIsNone(images[UUID1].member) # Member should be set to our tenant id for shared image UUID2 self.assertEqual(TENANT3, images[UUID2].member) # No member set for private image5 self.assertIsNone(images[uuids.image5].member) def test_list_all_images(self): filters = {'visibility': 'all'} images = self.image_repo.list(filters=filters) image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids) def test_list_with_checksum_filter_single_image(self): filters = {'checksum': CHECKSUM} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_checksum_filter_multiple_images(self): filters = {'checksum': CHCKSUM1} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(2, len(image_ids)) self.assertIn(UUID2, image_ids) self.assertIn(UUID3, image_ids) def test_list_with_wrong_checksum(self): WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd' filters = {'checksum': WRONG_CHKSUM} images = self.image_repo.list(filters=filters) self.assertEqual(0, len(images)) def test_list_with_tags_filter_single_tag(self): filters = {'tags': ['ping']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_tags_filter_multiple_tags(self): filters = {'tags': ['ping', 'pong']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_tags_filter_multiple_tags_and_nonexistent(self): filters = {'tags': ['ping', 'fake']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(0, len(image_ids)) def test_list_with_wrong_tags(self): filters = {'tags': ['fake']} images = self.image_repo.list(filters=filters) self.assertEqual(0, len(images)) def test_list_public_images(self): filters = {'visibility': 'public'} images = self.image_repo.list(filters=filters) image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID1, UUID3]), image_ids) def test_sorted_list(self): images = self.image_repo.list(sort_key=['size'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, UUID2, UUID3], image_ids) def test_sorted_list_with_multiple_keys(self): temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, name='1', size=1024, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]) self.db.image_create(None, image) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, temp_id, UUID2, UUID3], image_ids) images = self.image_repo.list(sort_key=['size', 'name'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, UUID2, temp_id, UUID3], image_ids) def test_sorted_list_with_multiple_dirs(self): temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, name='1', size=1024, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]) self.db.image_create(None, image) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['asc', 'desc']) image_ids = [i.image_id for i in images] self.assertEqual([temp_id, UUID1, UUID2, UUID3], image_ids) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['desc', 'asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID3, UUID2, UUID1, temp_id], image_ids) def test_add_image(self): image = self.image_factory.new_image(name='added image') self.assertEqual(image.updated_at, image.created_at) self.image_repo.add(image) retreived_image = self.image_repo.get(image.image_id) self.assertEqual('added image', retreived_image.name) self.assertEqual(image.updated_at, retreived_image.updated_at) def test_save_image(self): image = self.image_repo.get(UUID1) original_update_time = image.updated_at image.name = 'foo' image.tags = ['king', 'kong'] self.delay_inaccurate_clock() self.image_repo.save(image) current_update_time = image.updated_at self.assertGreater(current_update_time, original_update_time) image = self.image_repo.get(UUID1) self.assertEqual('foo', image.name) self.assertEqual(set(['king', 'kong']), image.tags) self.assertEqual(current_update_time, image.updated_at) def test_save_image_not_found(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID1) image.image_id = fake_uuid exc = self.assertRaises(exception.ImageNotFound, self.image_repo.save, image) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) def test_save_excludes_atomic_props(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID1) # Try to set the property normally image.extra_properties['os_glance_import_task'] = fake_uuid self.image_repo.save(image) # Expect it was ignored image = self.image_repo.get(UUID1) self.assertNotIn('os_glance_import_task', image.extra_properties) # Set the property atomically self.image_repo.set_property_atomic(image, 'os_glance_import_task', fake_uuid) # Expect it is set image = self.image_repo.get(UUID1) self.assertEqual(fake_uuid, image.extra_properties['os_glance_import_task']) # Try to clobber it image.extra_properties['os_glance_import_task'] = 'foo' self.image_repo.save(image) # Expect it is unchanged image = self.image_repo.get(UUID1) self.assertEqual(fake_uuid, image.extra_properties['os_glance_import_task']) # Try to delete it del image.extra_properties['os_glance_import_task'] self.image_repo.save(image) # Expect it is still present and set accordingly image = self.image_repo.get(UUID1) self.assertEqual(fake_uuid, image.extra_properties['os_glance_import_task']) def test_remove_image(self): image = self.image_repo.get(UUID1) previous_update_time = image.updated_at self.delay_inaccurate_clock() self.image_repo.remove(image) self.assertGreater(image.updated_at, previous_update_time) self.assertRaises(exception.ImageNotFound, self.image_repo.get, UUID1) def test_remove_image_not_found(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID1) image.image_id = fake_uuid exc = self.assertRaises( exception.ImageNotFound, self.image_repo.remove, image) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) def test_restore_image_status(self): image_id = uuid.uuid4() image = _db_fixture(image_id, name='restore_test', size=256, is_public=True, status='pending_delete') self.db.image_create(self.context, image) self.db.image_restore(self.context, image_id) image = self.db.image_get(self.context, image_id) self.assertEqual(image['status'], 'active') def test_restore_image_status_not_found(self): image_id = uuid.uuid4() self.assertRaises(exception.ImageNotFound, self.db.image_restore, self.context, image_id) def test_restore_image_status_not_pending_delete(self): image_id = uuid.uuid4() image = _db_fixture(image_id, name='restore_test', size=256, is_public=True, status='deleted') self.db.image_create(self.context, image) self.assertRaises(exception.Conflict, self.db.image_restore, self.context, image_id) def test_image_set_property_atomic(self): image_id = uuid.uuid4() image = _db_fixture(image_id, name='test') self.assertRaises(exception.ImageNotFound, self.db.image_set_property_atomic, image_id, 'foo', 'bar') self.db.image_create(self.context, image) self.db.image_set_property_atomic(image_id, 'foo', 'bar') image = self.db.image_get(self.context, image_id) self.assertEqual('foo', image['properties'][0]['name']) self.assertEqual('bar', image['properties'][0]['value']) def test_set_property_atomic(self): image = self.image_repo.get(UUID1) self.image_repo.set_property_atomic(image, 'foo', 'bar') image = self.image_repo.get(image.image_id) self.assertEqual({'foo': 'bar'}, image.extra_properties) def test_image_delete_property_atomic(self): image_id = uuid.uuid4() image = _db_fixture(image_id, name='test') self.assertRaises(exception.NotFound, self.db.image_delete_property_atomic, image_id, 'foo', 'bar') self.db.image_create(self.context, image) self.db.image_set_property_atomic(image_id, 'foo', 'bar') self.db.image_delete_property_atomic(image_id, 'foo', 'bar') image = self.image_repo.get(image_id) self.assertEqual({}, image.extra_properties) def test_delete_property_atomic(self): image = self.image_repo.get(UUID1) self.image_repo.set_property_atomic(image, 'foo', 'bar') image = self.image_repo.get(image.image_id) self.image_repo.delete_property_atomic(image, 'foo', 'bar') image = self.image_repo.get(image.image_id) self.assertEqual({}, image.extra_properties) class TestEncryptedLocations(test_utils.BaseTestCase): def setUp(self): super(TestEncryptedLocations, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_factory = glance.domain.ImageFactory() self.crypt_key = '0123456789abcdef' self.config(metadata_encryption_key=self.crypt_key) self.foo_bar_location = [{'url': 'foo', 'metadata': {}, 'status': 'active'}, {'url': 'bar', 'metadata': {}, 'status': 'active'}] def test_encrypt_locations_on_add(self): image = self.image_factory.new_image(UUID1) image.locations = self.foo_bar_location self.image_repo.add(image) db_data = self.db.image_get(self.context, UUID1) self.assertNotEqual(db_data['locations'], ['foo', 'bar']) decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, location['url']) for location in db_data['locations']] self.assertEqual([location['url'] for location in self.foo_bar_location], decrypted_locations) def test_encrypt_locations_on_save(self): image = self.image_factory.new_image(UUID1) self.image_repo.add(image) image.locations = self.foo_bar_location self.image_repo.save(image) db_data = self.db.image_get(self.context, UUID1) self.assertNotEqual(db_data['locations'], ['foo', 'bar']) decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, location['url']) for location in db_data['locations']] self.assertEqual([location['url'] for location in self.foo_bar_location], decrypted_locations) def test_decrypt_locations_on_get(self): url_loc = ['ping', 'pong'] orig_locations = [{'url': location, 'metadata': {}, 'status': 'active'} for location in url_loc] encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, location) for location in url_loc] encrypted_locations = [{'url': location, 'metadata': {}, 'status': 'active'} for location in encrypted_locs] self.assertNotEqual(encrypted_locations, orig_locations) db_data = _db_fixture(UUID1, owner=TENANT1, locations=encrypted_locations) self.db.image_create(None, db_data) image = self.image_repo.get(UUID1) self.assertIn('id', image.locations[0]) self.assertIn('id', image.locations[1]) image.locations[0].pop('id') image.locations[1].pop('id') self.assertEqual(orig_locations, image.locations) def test_decrypt_locations_on_list(self): url_loc = ['ping', 'pong'] orig_locations = [{'url': location, 'metadata': {}, 'status': 'active'} for location in url_loc] encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, location) for location in url_loc] encrypted_locations = [{'url': location, 'metadata': {}, 'status': 'active'} for location in encrypted_locs] self.assertNotEqual(encrypted_locations, orig_locations) db_data = _db_fixture(UUID1, owner=TENANT1, locations=encrypted_locations) self.db.image_create(None, db_data) image = self.image_repo.list()[0] self.assertIn('id', image.locations[0]) self.assertIn('id', image.locations[1]) image.locations[0].pop('id') image.locations[1].pop('id') self.assertEqual(orig_locations, image.locations) class TestImageMemberRepo(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_member_factory = glance.domain.ImageMemberFactory() self._create_images() self._create_image_members() image = self.image_repo.get(UUID1) self.image_member_repo = glance.db.ImageMemberRepo(self.context, self.db, image) def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, name='1', size=256, status='active'), _db_fixture(UUID2, owner=TENANT1, name='2', size=512, visibility='shared'), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID1, TENANT2), _db_image_member_fixture(UUID1, TENANT3), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_list(self): image_members = self.image_member_repo.list() image_member_ids = set([i.member_id for i in image_members]) self.assertEqual(set([TENANT2, TENANT3]), image_member_ids) def test_list_no_members(self): image = self.image_repo.get(UUID2) self.image_member_repo_uuid2 = glance.db.ImageMemberRepo( self.context, self.db, image) image_members = self.image_member_repo_uuid2.list() image_member_ids = set([i.member_id for i in image_members]) self.assertEqual(set([]), image_member_ids) def test_save_image_member(self): image_member = self.image_member_repo.get(TENANT2) image_member.status = 'accepted' self.image_member_repo.save(image_member) image_member_updated = self.image_member_repo.get(TENANT2) self.assertEqual(image_member.id, image_member_updated.id) self.assertEqual('accepted', image_member_updated.status) def test_add_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) retreived_image_member = self.image_member_repo.get(TENANT4) self.assertIsNotNone(retreived_image_member.id) self.assertEqual(image_member.image_id, retreived_image_member.image_id) self.assertEqual(image_member.member_id, retreived_image_member.member_id) self.assertEqual('pending', retreived_image_member.status) def test_add_duplicate_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) retreived_image_member = self.image_member_repo.get(TENANT4) self.assertIsNotNone(retreived_image_member.id) self.assertEqual(image_member.image_id, retreived_image_member.image_id) self.assertEqual(image_member.member_id, retreived_image_member.member_id) self.assertEqual('pending', retreived_image_member.status) self.assertRaises(exception.Duplicate, self.image_member_repo.add, image_member) def test_get_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) member = self.image_member_repo.get(image_member.member_id) self.assertEqual(member.id, image_member.id) self.assertEqual(member.image_id, image_member.image_id) self.assertEqual(member.member_id, image_member.member_id) self.assertEqual('pending', member.status) def test_get_nonexistent_image_member(self): fake_image_member_id = 'fake' self.assertRaises(exception.NotFound, self.image_member_repo.get, fake_image_member_id) def test_remove_image_member(self): image_member = self.image_member_repo.get(TENANT2) self.image_member_repo.remove(image_member) self.assertRaises(exception.NotFound, self.image_member_repo.get, TENANT2) def test_remove_image_member_does_not_exist(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID2) fake_member = glance.domain.ImageMemberFactory().new_image_member( image, TENANT4) fake_member.id = fake_uuid exc = self.assertRaises(exception.NotFound, self.image_member_repo.remove, fake_member) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) class TestTaskRepo(test_utils.BaseTestCase): def setUp(self): super(TestTaskRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext(user=USER1, tenant=TENANT1) self.task_repo = glance.db.TaskRepo(self.context, self.db) self.task_factory = glance.domain.TaskFactory() self.fake_task_input = ('{"import_from": ' '"swift://cloud.foo/account/mycontainer/path"' ',"import_from_format": "qcow2"}') self._create_tasks() def _create_tasks(self): self.tasks = [ _db_task_fixture(UUID1, type='import', status='pending', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID2, type='import', status='processing', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID3, type='import', status='failure', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID4, type='import', status='success', input=self.fake_task_input, result='', owner=TENANT2, message='', ), ] [self.db.task_create(None, task) for task in self.tasks] def test_get(self): task = self.task_repo.get(UUID1) self.assertEqual(task.task_id, UUID1) self.assertEqual('import', task.type) self.assertEqual('pending', task.status) self.assertEqual(task.task_input, self.fake_task_input) self.assertEqual('', task.result) self.assertEqual('', task.message) self.assertEqual(task.owner, TENANT1) def test_get_not_found(self): self.assertRaises(exception.NotFound, self.task_repo.get, str(uuid.uuid4())) def test_get_forbidden(self): self.assertRaises(exception.NotFound, self.task_repo.get, UUID4) def test_list(self): tasks = self.task_repo.list() task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) def test_list_with_type(self): filters = {'type': 'import'} tasks = self.task_repo.list(filters=filters) task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) def test_list_with_status(self): filters = {'status': 'failure'} tasks = self.task_repo.list(filters=filters) task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID3]), task_ids) def test_list_with_marker(self): full_tasks = self.task_repo.list() full_ids = [i.task_id for i in full_tasks] marked_tasks = self.task_repo.list(marker=full_ids[0]) actual_ids = [i.task_id for i in marked_tasks] self.assertEqual(full_ids[1:], actual_ids) def test_list_with_last_marker(self): tasks = self.task_repo.list() marked_tasks = self.task_repo.list(marker=tasks[-1].task_id) self.assertEqual(0, len(marked_tasks)) def test_limited_list(self): limited_tasks = self.task_repo.list(limit=2) self.assertEqual(2, len(limited_tasks)) def test_list_with_marker_and_limit(self): full_tasks = self.task_repo.list() full_ids = [i.task_id for i in full_tasks] marked_tasks = self.task_repo.list(marker=full_ids[0], limit=1) actual_ids = [i.task_id for i in marked_tasks] self.assertEqual(full_ids[1:2], actual_ids) def test_sorted_list(self): tasks = self.task_repo.list(sort_key='status', sort_dir='desc') task_ids = [i.task_id for i in tasks] self.assertEqual([UUID2, UUID1, UUID3], task_ids) def test_add_task(self): task_type = 'import' image_id = 'fake_image_id' user_id = 'fake_user' request_id = 'fake_request_id' task = self.task_factory.new_task(task_type, None, image_id, user_id, request_id, task_input=self.fake_task_input) self.assertEqual(task.updated_at, task.created_at) self.task_repo.add(task) retrieved_task = self.task_repo.get(task.task_id) self.assertEqual(task.updated_at, retrieved_task.updated_at) self.assertEqual(self.fake_task_input, retrieved_task.task_input) self.assertEqual(image_id, task.image_id) self.assertEqual(user_id, task.user_id) self.assertEqual(request_id, task.request_id) def test_save_task(self): task = self.task_repo.get(UUID1) original_update_time = task.updated_at self.delay_inaccurate_clock() self.task_repo.save(task) current_update_time = task.updated_at self.assertGreater(current_update_time, original_update_time) task = self.task_repo.get(UUID1) self.assertEqual(current_update_time, task.updated_at) def test_remove_task(self): task = self.task_repo.get(UUID1) self.task_repo.remove(task) self.assertRaises(exception.NotFound, self.task_repo.get, task.task_id) class RetryOnDeadlockTestCase(test_utils.BaseTestCase): def test_raise_deadlock(self): class TestException(Exception): pass self.attempts = 3 def _mock_get_session(): def _raise_exceptions(): self.attempts -= 1 if self.attempts <= 0: raise TestException("Exit") raise db_exc.DBDeadlock("Fake Exception") return _raise_exceptions with mock.patch.object(api, 'get_session') as sess: sess.side_effect = _mock_get_session() try: api.image_update(None, 'fake-id', {}) except TestException: self.assertEqual(3, sess.call_count) # Test retry on image destroy if db deadlock occurs self.attempts = 3 with mock.patch.object(api, 'get_session') as sess: sess.side_effect = _mock_get_session() try: api.image_destroy(None, 'fake-id') except TestException: self.assertEqual(3, sess.call_count) class TestImageDeleteRace(test_utils.BaseTestCase): @mock.patch.object(api, 'get_session') @mock.patch.object(api, 'LOG') def test_image_property_delete_stale_data( self, mock_LOG, mock_get_session, ): mock_context = mock.MagicMock() mock_session = mock_get_session.return_value mock_result = (mock_session.query.return_value. filter_by.return_value. one.return_value) mock_result.delete.side_effect = sa_orm.exc.StaleDataError('myerror') # StaleDataError should not be raised r = api.image_property_delete(mock_context, 'myprop', 'myimage') # We should not get the property back self.assertIsNone(r) # Make sure we logged it mock_LOG.debug.assert_called_once_with( 'StaleDataError while deleting property %(prop)r ' 'from image %(image)r likely means we raced during delete: ' '%(err)s', {'prop': 'myprop', 'image': 'myimage', 'err': 'myerror'}) @mock.patch.object(api, 'get_session') def test_image_property_delete_exception(self, mock_get_session): mock_context = mock.MagicMock() mock_session = mock_get_session.return_value mock_result = (mock_session.query.return_value. filter_by.return_value. one.return_value) mock_result.delete.side_effect = RuntimeError # Any other exception should be raised self.assertRaises(RuntimeError, api.image_property_delete, mock_context, 'myprop', 'myimage') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_db_metadef.py0000664000175000017500000005673400000000000022132 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2014 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import encodeutils from glance.common import exception import glance.context import glance.db import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' NAMESPACE1 = 'namespace1' NAMESPACE2 = 'namespace2' NAMESPACE3 = 'namespace3' NAMESPACE4 = 'namespace4' PROPERTY1 = 'Property1' PROPERTY2 = 'Property2' PROPERTY3 = 'Property3' OBJECT1 = 'Object1' OBJECT2 = 'Object2' OBJECT3 = 'Object3' TAG1 = 'Tag1' TAG2 = 'Tag2' TAG3 = 'Tag3' TAG4 = 'Tag4' TAG5 = 'Tag5' RESOURCE_TYPE1 = 'ResourceType1' RESOURCE_TYPE2 = 'ResourceType2' RESOURCE_TYPE3 = 'ResourceType3' def _db_namespace_fixture(**kwargs): namespace = { 'namespace': None, 'display_name': None, 'description': None, 'visibility': True, 'protected': False, 'owner': None } namespace.update(kwargs) return namespace def _db_property_fixture(name, **kwargs): property = { 'name': name, 'json_schema': {"type": "string", "title": "title"}, } property.update(kwargs) return property def _db_object_fixture(name, **kwargs): obj = { 'name': name, 'description': None, 'json_schema': {}, 'required': '[]', } obj.update(kwargs) return obj def _db_tag_fixture(name, **kwargs): obj = { 'name': name } obj.update(kwargs) return obj def _db_tags_fixture(names=None): tags = [] if names: tag_name_list = names else: tag_name_list = [TAG1, TAG2, TAG3] for tag_name in tag_name_list: tags.append(_db_tag_fixture(tag_name)) return tags def _db_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'protected': False, } obj.update(kwargs) return obj def _db_namespace_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'properties_target': None, 'prefix': None, } obj.update(kwargs) return obj class TestMetadefRepo(test_utils.BaseTestCase): def setUp(self): super(TestMetadefRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext(user=USER1, tenant=TENANT1) self.namespace_repo = glance.db.MetadefNamespaceRepo(self.context, self.db) self.property_repo = glance.db.MetadefPropertyRepo(self.context, self.db) self.object_repo = glance.db.MetadefObjectRepo(self.context, self.db) self.tag_repo = glance.db.MetadefTagRepo(self.context, self.db) self.resource_type_repo = glance.db.MetadefResourceTypeRepo( self.context, self.db) self.namespace_factory = glance.domain.MetadefNamespaceFactory() self.property_factory = glance.domain.MetadefPropertyFactory() self.object_factory = glance.domain.MetadefObjectFactory() self.tag_factory = glance.domain.MetadefTagFactory() self.resource_type_factory = glance.domain.MetadefResourceTypeFactory() self._create_namespaces() self._create_properties() self._create_objects() self._create_tags() self._create_resource_types() def _create_namespaces(self): self.namespaces = [ _db_namespace_fixture(namespace=NAMESPACE1, display_name='1', description='desc1', visibility='private', protected=True, owner=TENANT1), _db_namespace_fixture(namespace=NAMESPACE2, display_name='2', description='desc2', visibility='public', protected=False, owner=TENANT1), _db_namespace_fixture(namespace=NAMESPACE3, display_name='3', description='desc3', visibility='private', protected=True, owner=TENANT3), _db_namespace_fixture(namespace=NAMESPACE4, display_name='4', description='desc4', visibility='public', protected=True, owner=TENANT3) ] [self.db.metadef_namespace_create(None, namespace) for namespace in self.namespaces] def _create_properties(self): self.properties = [ _db_property_fixture(name=PROPERTY1), _db_property_fixture(name=PROPERTY2), _db_property_fixture(name=PROPERTY3) ] [self.db.metadef_property_create(self.context, NAMESPACE1, property) for property in self.properties] [self.db.metadef_property_create(self.context, NAMESPACE4, property) for property in self.properties] def _create_objects(self): self.objects = [ _db_object_fixture(name=OBJECT1, description='desc1'), _db_object_fixture(name=OBJECT2, description='desc2'), _db_object_fixture(name=OBJECT3, description='desc3'), ] [self.db.metadef_object_create(self.context, NAMESPACE1, object) for object in self.objects] [self.db.metadef_object_create(self.context, NAMESPACE4, object) for object in self.objects] def _create_tags(self): self.tags = [ _db_tag_fixture(name=TAG1), _db_tag_fixture(name=TAG2), _db_tag_fixture(name=TAG3), ] [self.db.metadef_tag_create(self.context, NAMESPACE1, tag) for tag in self.tags] [self.db.metadef_tag_create(self.context, NAMESPACE4, tag) for tag in self.tags] def _create_resource_types(self): self.resource_types = [ _db_resource_type_fixture(name=RESOURCE_TYPE1, protected=False), _db_resource_type_fixture(name=RESOURCE_TYPE2, protected=False), _db_resource_type_fixture(name=RESOURCE_TYPE3, protected=True), ] [self.db.metadef_resource_type_create(self.context, resource_type) for resource_type in self.resource_types] def test_get_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(NAMESPACE1, namespace.namespace) self.assertEqual('desc1', namespace.description) self.assertEqual('1', namespace.display_name) self.assertEqual(TENANT1, namespace.owner) self.assertTrue(namespace.protected) self.assertEqual('private', namespace.visibility) def test_get_namespace_not_found(self): fake_namespace = "fake_namespace" exc = self.assertRaises(exception.NotFound, self.namespace_repo.get, fake_namespace) self.assertIn(fake_namespace, encodeutils.exception_to_unicode(exc)) def test_get_namespace_forbidden(self): self.assertRaises(exception.MetadefForbidden, self.namespace_repo.get, NAMESPACE3) def test_list_namespace(self): namespaces = self.namespace_repo.list() namespace_names = set([n.namespace for n in namespaces]) self.assertEqual(set([NAMESPACE1, NAMESPACE2, NAMESPACE4]), namespace_names) def test_list_private_namespaces(self): filters = {'visibility': 'private'} namespaces = self.namespace_repo.list(filters=filters) namespace_names = set([n.namespace for n in namespaces]) self.assertEqual(set([NAMESPACE1]), namespace_names) def test_add_namespace(self): # NOTE(pawel-koniszewski): Change db_namespace_fixture to # namespace_factory when namespace primary key in DB # will be changed from Integer to UUID namespace = _db_namespace_fixture(namespace='added_namespace', display_name='fake', description='fake_desc', visibility='public', protected=True, owner=TENANT1) self.assertEqual('added_namespace', namespace['namespace']) self.db.metadef_namespace_create(None, namespace) retrieved_namespace = self.namespace_repo.get(namespace['namespace']) self.assertEqual('added_namespace', retrieved_namespace.namespace) def test_save_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) namespace.display_name = 'save_name' namespace.description = 'save_desc' self.namespace_repo.save(namespace) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual('save_name', namespace.display_name) self.assertEqual('save_desc', namespace.description) def test_remove_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) self.namespace_repo.remove(namespace) self.assertRaises(exception.NotFound, self.namespace_repo.get, NAMESPACE1) def test_remove_namespace_not_found(self): fake_name = 'fake_name' namespace = self.namespace_repo.get(NAMESPACE1) namespace.namespace = fake_name exc = self.assertRaises(exception.NotFound, self.namespace_repo.remove, namespace) self.assertIn(fake_name, encodeutils.exception_to_unicode(exc)) def test_get_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(PROPERTY1, property.name) self.assertEqual(namespace.namespace, property.namespace.namespace) def test_get_property_not_found(self): exc = self.assertRaises(exception.NotFound, self.property_repo.get, NAMESPACE2, PROPERTY1) self.assertIn(PROPERTY1, encodeutils.exception_to_unicode(exc)) def test_list_property(self): properties = self.property_repo.list(filters={'namespace': NAMESPACE1}) property_names = set([p.name for p in properties]) self.assertEqual(set([PROPERTY1, PROPERTY2, PROPERTY3]), property_names) def test_list_property_empty_result(self): properties = self.property_repo.list(filters={'namespace': NAMESPACE2}) property_names = set([p.name for p in properties]) self.assertEqual(set([]), property_names) def test_list_property_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.property_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_property(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.db.metadef_property_create(self.context, NAMESPACE1, property) retrieved_property = self.property_repo.get(NAMESPACE1, 'added_property') self.assertEqual('added_property', retrieved_property.name) def test_add_property_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.assertRaises(exception.Forbidden, self.db.metadef_property_create, self.context, NAMESPACE3, property) def test_add_property_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.assertRaises(exception.NotFound, self.db.metadef_property_create, self.context, 'not_a_namespace', property) def test_save_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) property.schema = '{"save": "schema"}' self.property_repo.save(property) property = self.property_repo.get(NAMESPACE1, PROPERTY1) self.assertEqual(PROPERTY1, property.name) self.assertEqual('{"save": "schema"}', property.schema) def test_remove_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) self.property_repo.remove(property) self.assertRaises(exception.NotFound, self.property_repo.get, NAMESPACE1, PROPERTY1) def test_remove_property_not_found(self): fake_name = 'fake_name' property = self.property_repo.get(NAMESPACE1, PROPERTY1) property.name = fake_name self.assertRaises(exception.NotFound, self.property_repo.remove, property) def test_get_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(OBJECT1, object.name) self.assertEqual('desc1', object.description) self.assertEqual(['[]'], object.required) self.assertEqual({}, object.properties) self.assertEqual(namespace.namespace, object.namespace.namespace) def test_get_object_not_found(self): exc = self.assertRaises(exception.NotFound, self.object_repo.get, NAMESPACE2, OBJECT1) self.assertIn(OBJECT1, encodeutils.exception_to_unicode(exc)) def test_list_object(self): objects = self.object_repo.list(filters={'namespace': NAMESPACE1}) object_names = set([o.name for o in objects]) self.assertEqual(set([OBJECT1, OBJECT2, OBJECT3]), object_names) def test_list_object_empty_result(self): objects = self.object_repo.list(filters={'namespace': NAMESPACE2}) object_names = set([o.name for o in objects]) self.assertEqual(set([]), object_names) def test_list_object_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.object_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_object(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.db.metadef_object_create(self.context, NAMESPACE1, object) retrieved_object = self.object_repo.get(NAMESPACE1, 'added_object') self.assertEqual('added_object', retrieved_object.name) def test_add_object_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.assertRaises(exception.Forbidden, self.db.metadef_object_create, self.context, NAMESPACE3, object) def test_add_object_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.assertRaises(exception.NotFound, self.db.metadef_object_create, self.context, 'not-a-namespace', object) def test_save_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) object.required = ['save_req'] object.description = 'save_desc' self.object_repo.save(object) object = self.object_repo.get(NAMESPACE1, OBJECT1) self.assertEqual(OBJECT1, object.name) self.assertEqual(['save_req'], object.required) self.assertEqual('save_desc', object.description) def test_remove_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) self.object_repo.remove(object) self.assertRaises(exception.NotFound, self.object_repo.get, NAMESPACE1, OBJECT1) def test_remove_object_not_found(self): fake_name = 'fake_name' object = self.object_repo.get(NAMESPACE1, OBJECT1) object.name = fake_name self.assertRaises(exception.NotFound, self.object_repo.remove, object) def test_list_resource_type(self): resource_type = self.resource_type_repo.list( filters={'namespace': NAMESPACE1}) self.assertEqual(0, len(resource_type)) def test_get_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(TAG1, tag.name) self.assertEqual(namespace.namespace, tag.namespace.namespace) def test_get_tag_not_found(self): exc = self.assertRaises(exception.NotFound, self.tag_repo.get, NAMESPACE2, TAG1) self.assertIn(TAG1, encodeutils.exception_to_unicode(exc)) def test_list_tag(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) def test_list_tag_empty_result(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE2}) tag_names = set([t.name for t in tags]) self.assertEqual(set([]), tag_names) def test_list_tag_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.tag_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_tag(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.db.metadef_tag_create(self.context, NAMESPACE1, tag) retrieved_tag = self.tag_repo.get(NAMESPACE1, 'added_tag') self.assertEqual('added_tag', retrieved_tag.name) def test_add_tags(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) tags = _db_tags_fixture([TAG3, TAG4, TAG5]) self.db.metadef_tag_create_tags(self.context, NAMESPACE1, tags) tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG3, TAG4, TAG5]), tag_names) def test_add_tags_with_append_true(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) tags = _db_tags_fixture([TAG4, TAG5]) self.db.metadef_tag_create_tags(self.context, NAMESPACE1, tags, can_append=True) tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3, TAG4, TAG5]), tag_names) def test_add_duplicate_tags_with_pre_existing_tags(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) tags = _db_tags_fixture([TAG5, TAG4, TAG5]) self.assertRaises(exception.Duplicate, self.db.metadef_tag_create_tags, self.context, NAMESPACE1, tags) tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) def test_add_tag_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.assertRaises(exception.Forbidden, self.db.metadef_tag_create, self.context, NAMESPACE3, tag) def test_add_tag_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.assertRaises(exception.NotFound, self.db.metadef_tag_create, self.context, 'not-a-namespace', tag) def test_save_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) self.tag_repo.save(tag) tag = self.tag_repo.get(NAMESPACE1, TAG1) self.assertEqual(TAG1, tag.name) def test_remove_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) self.tag_repo.remove(tag) self.assertRaises(exception.NotFound, self.tag_repo.get, NAMESPACE1, TAG1) def test_remove_tag_not_found(self): fake_name = 'fake_name' tag = self.tag_repo.get(NAMESPACE1, TAG1) tag.name = fake_name self.assertRaises(exception.NotFound, self.tag_repo.remove, tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_domain.py0000664000175000017500000006010600000000000021313 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid from oslo_config import cfg import oslo_utils.importutils import glance.async_ from glance.async_ import taskflow_executor from glance.common import exception from glance.common import timeutils from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImageFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageFactory, self).setUp() self.image_factory = domain.ImageFactory() def test_minimal_new_image(self): image = self.image_factory.new_image() self.assertIsNotNone(image.image_id) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('shared', image.visibility) self.assertIsNone(image.owner) self.assertIsNone(image.name) self.assertIsNone(image.size) self.assertEqual(0, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({}, image.extra_properties) self.assertEqual(set([]), image.tags) def test_new_image(self): image = self.image_factory.new_image( image_id=UUID1, name='image-1', min_disk=256, owner=TENANT1) self.assertEqual(UUID1, image.image_id) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('shared', image.visibility) self.assertEqual(TENANT1, image.owner) self.assertEqual('image-1', image.name) self.assertIsNone(image.size) self.assertEqual(256, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({}, image.extra_properties) self.assertEqual(set([]), image.tags) def test_new_image_with_extra_properties_and_tags(self): extra_properties = {'foo': 'bar'} tags = ['one', 'two'] image = self.image_factory.new_image( image_id=UUID1, name='image-1', extra_properties=extra_properties, tags=tags) self.assertEqual(UUID1, image.image_id, UUID1) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('shared', image.visibility) self.assertIsNone(image.owner) self.assertEqual('image-1', image.name) self.assertIsNone(image.size) self.assertEqual(0, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({'foo': 'bar'}, image.extra_properties) self.assertEqual(set(['one', 'two']), image.tags) def test_new_image_read_only_property(self): self.assertRaises(exception.ReadonlyProperty, self.image_factory.new_image, image_id=UUID1, name='image-1', size=256) def test_new_image_unexpected_property(self): self.assertRaises(TypeError, self.image_factory.new_image, image_id=UUID1, image_name='name-1') def test_new_image_reserved_property(self): extra_properties = {'deleted': True} self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, image_id=UUID1, extra_properties=extra_properties) def test_new_image_for_is_public(self): extra_prop = {'is_public': True} new_image = self.image_factory.new_image(image_id=UUID1, extra_properties=extra_prop) self.assertEqual(True, new_image.extra_properties['is_public']) class TestImage(test_utils.BaseTestCase): def setUp(self): super(TestImage, self).setUp() self.image_factory = domain.ImageFactory() self.image = self.image_factory.new_image( container_format='bear', disk_format='rawr') def test_extra_properties(self): self.image.extra_properties = {'foo': 'bar'} self.assertEqual({'foo': 'bar'}, self.image.extra_properties) def test_extra_properties_assign(self): self.image.extra_properties['foo'] = 'bar' self.assertEqual({'foo': 'bar'}, self.image.extra_properties) def test_delete_extra_properties(self): self.image.extra_properties = {'foo': 'bar'} self.assertEqual({'foo': 'bar'}, self.image.extra_properties) del self.image.extra_properties['foo'] self.assertEqual({}, self.image.extra_properties) def test_visibility_enumerated(self): self.image.visibility = 'public' self.image.visibility = 'private' self.image.visibility = 'shared' self.image.visibility = 'community' self.assertRaises(ValueError, setattr, self.image, 'visibility', 'ellison') def test_tags_always_a_set(self): self.image.tags = ['a', 'b', 'c'] self.assertEqual(set(['a', 'b', 'c']), self.image.tags) def test_delete_protected_image(self): self.image.protected = True self.assertRaises(exception.ProtectedImageDelete, self.image.delete) def test_status_saving(self): self.image.status = 'saving' self.assertEqual('saving', self.image.status) def test_set_incorrect_status(self): self.image.status = 'saving' self.image.status = 'killed' self.assertRaises( exception.InvalidImageStatusTransition, setattr, self.image, 'status', 'delet') def test_status_saving_without_disk_format(self): self.image.disk_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'saving') def test_status_saving_without_container_format(self): self.image.container_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'saving') def test_status_active_without_disk_format(self): self.image.disk_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'active') def test_status_active_without_container_format(self): self.image.container_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'active') def test_delayed_delete(self): self.config(delayed_delete=True) self.image.status = 'active' self.image.locations = [{'url': 'http://foo.bar/not.exists', 'metadata': {}}] self.assertEqual('active', self.image.status) self.image.delete() self.assertEqual('pending_delete', self.image.status) class TestImageMember(test_utils.BaseTestCase): def setUp(self): super(TestImageMember, self).setUp() self.image_member_factory = domain.ImageMemberFactory() self.image_factory = domain.ImageFactory() self.image = self.image_factory.new_image() self.image_member = self.image_member_factory.new_image_member( image=self.image, member_id=TENANT1) def test_status_enumerated(self): self.image_member.status = 'pending' self.image_member.status = 'accepted' self.image_member.status = 'rejected' self.assertRaises(ValueError, setattr, self.image_member, 'status', 'ellison') class TestImageMemberFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberFactory, self).setUp() self.image_member_factory = domain.ImageMemberFactory() self.image_factory = domain.ImageFactory() def test_minimal_new_image_member(self): member_id = 'fake-member-id' image = self.image_factory.new_image( image_id=UUID1, name='image-1', min_disk=256, owner=TENANT1) image_member = self.image_member_factory.new_image_member(image, member_id) self.assertEqual(image_member.image_id, image.image_id) self.assertIsNotNone(image_member.created_at) self.assertEqual(image_member.created_at, image_member.updated_at) self.assertEqual('pending', image_member.status) self.assertIsNotNone(image_member.member_id) class TestExtraProperties(test_utils.BaseTestCase): def test_getitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) self.assertEqual('bar', extra_properties['foo']) self.assertEqual('golden', extra_properties['snitch']) def test_getitem_with_no_items(self): extra_properties = domain.ExtraProperties() self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') def test_setitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) extra_properties['foo'] = 'baz' self.assertEqual('baz', extra_properties['foo']) def test_delitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) del extra_properties['foo'] self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') self.assertEqual('golden', extra_properties['snitch']) def test_len_with_zero_items(self): extra_properties = domain.ExtraProperties() self.assertEqual(0, len(extra_properties)) def test_len_with_non_zero_items(self): extra_properties = domain.ExtraProperties() extra_properties['foo'] = 'bar' extra_properties['snitch'] = 'golden' self.assertEqual(2, len(extra_properties)) def test_eq_with_a_dict(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = {'foo': 'bar', 'snitch': 'golden'} self.assertEqual(ref_extra_properties, extra_properties) def test_eq_with_an_object_of_ExtraProperties(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = domain.ExtraProperties() ref_extra_properties['snitch'] = 'golden' ref_extra_properties['foo'] = 'bar' self.assertEqual(ref_extra_properties, extra_properties) def test_eq_with_uneqal_dict(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = {'boo': 'far', 'gnitch': 'solden'} self.assertNotEqual(ref_extra_properties, extra_properties) def test_eq_with_unequal_ExtraProperties_object(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = domain.ExtraProperties() ref_extra_properties['gnitch'] = 'solden' ref_extra_properties['boo'] = 'far' self.assertNotEqual(ref_extra_properties, extra_properties) def test_eq_with_incompatible_object(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) random_list = ['foo', 'bar'] self.assertNotEqual(random_list, extra_properties) class TestTaskFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskFactory, self).setUp() self.task_factory = domain.TaskFactory() def test_new_task(self): task_type = 'import' owner = TENANT1 task_input = 'input' image_id = 'fake_image_id' user_id = 'fake_user' request_id = 'fake_request_id' task = self.task_factory.new_task(task_type, owner, image_id, user_id, request_id, task_input=task_input, result='test_result', message='test_message') self.assertIsNotNone(task.task_id) self.assertIsNotNone(task.created_at) self.assertEqual(task_type, task.type) self.assertEqual(task.created_at, task.updated_at) self.assertEqual('pending', task.status) self.assertIsNone(task.expires_at) self.assertEqual(owner, task.owner) self.assertEqual(task_input, task.task_input) self.assertEqual('test_message', task.message) self.assertEqual('test_result', task.result) self.assertEqual(image_id, task.image_id) self.assertEqual(user_id, task.user_id) self.assertEqual(request_id, task.request_id) def test_new_task_invalid_type(self): task_type = 'blah' image_id = 'fake_image_id' user_id = 'fake_user' request_id = 'fake_request_id' owner = TENANT1 self.assertRaises( exception.InvalidTaskType, self.task_factory.new_task, task_type, owner, image_id, user_id, request_id ) class TestTask(test_utils.BaseTestCase): def setUp(self): super(TestTask, self).setUp() self.task_factory = domain.TaskFactory() task_type = 'import' image_id = 'fake_image_id' user_id = 'fake_user' request_id = 'fake_request_id' owner = TENANT1 task_ttl = CONF.task.task_time_to_live self.task = self.task_factory.new_task(task_type, owner, image_id, user_id, request_id, task_time_to_live=task_ttl) def test_task_invalid_status(self): task_id = str(uuid.uuid4()) status = 'blah' self.assertRaises( exception.InvalidTaskStatus, domain.Task, task_id, task_type='import', status=status, owner=None, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), task_input=None, message=None, result=None ) def test_validate_status_transition_from_pending(self): self.task.begin_processing() self.assertEqual('processing', self.task.status) def test_validate_status_transition_from_processing_to_success(self): self.task.begin_processing() self.task.succeed('') self.assertEqual('success', self.task.status) def test_validate_status_transition_from_processing_to_failure(self): self.task.begin_processing() self.task.fail('') self.assertEqual('failure', self.task.status) def test_invalid_status_transitions_from_pending(self): # test do not allow transition from pending to success self.assertRaises( exception.InvalidTaskStatusTransition, self.task.succeed, '' ) def test_invalid_status_transitions_from_success(self): # test do not allow transition from success to processing self.task.begin_processing() self.task.succeed('') self.assertRaises( exception.InvalidTaskStatusTransition, self.task.begin_processing ) # test do not allow transition from success to failure self.assertRaises( exception.InvalidTaskStatusTransition, self.task.fail, '' ) def test_invalid_status_transitions_from_failure(self): # test do not allow transition from failure to processing self.task.begin_processing() self.task.fail('') self.assertRaises( exception.InvalidTaskStatusTransition, self.task.begin_processing ) # test do not allow transition from failure to success self.assertRaises( exception.InvalidTaskStatusTransition, self.task.succeed, '' ) def test_begin_processing(self): self.task.begin_processing() self.assertEqual('processing', self.task.status) @mock.patch.object(timeutils, 'utcnow') def test_succeed(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.succeed('{"location": "file://home"}') self.assertEqual('success', self.task.status) self.assertEqual('{"location": "file://home"}', self.task.result) self.assertEqual(u'', self.task.message) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at ) @mock.patch.object(timeutils, 'utcnow') def test_fail(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.fail('{"message": "connection failed"}') self.assertEqual('failure', self.task.status) self.assertEqual('{"message": "connection failed"}', self.task.message) self.assertIsNone(self.task.result) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at ) @mock.patch.object(glance.async_.TaskExecutor, 'begin_processing') def test_run(self, mock_begin_processing): executor = glance.async_.TaskExecutor(context=mock.ANY, task_repo=mock.ANY, image_repo=mock.ANY, image_factory=mock.ANY) self.task.run(executor) mock_begin_processing.assert_called_once_with(self.task.task_id) class TestTaskStub(test_utils.BaseTestCase): def setUp(self): super(TestTaskStub, self).setUp() self.task_id = str(uuid.uuid4()) self.task_type = 'import' self.owner = TENANT1 self.task_ttl = CONF.task.task_time_to_live self.image_id = 'fake_image_id' self.user_id = 'fake_user' self.request_id = 'fake_request_id' def test_task_stub_init(self): self.task_factory = domain.TaskFactory() task = domain.TaskStub( self.task_id, self.task_type, 'status', self.owner, 'expires_at', 'created_at', 'updated_at', self.image_id, self.user_id, self.request_id, ) self.assertEqual(self.task_id, task.task_id) self.assertEqual(self.task_type, task.type) self.assertEqual(self.owner, task.owner) self.assertEqual('status', task.status) self.assertEqual('expires_at', task.expires_at) self.assertEqual('created_at', task.created_at) self.assertEqual('updated_at', task.updated_at) self.assertEqual(self.image_id, task.image_id) self.assertEqual(self.user_id, task.user_id) self.assertEqual(self.request_id, task.request_id) def test_task_stub_get_status(self): status = 'pending' task = domain.TaskStub( self.task_id, self.task_type, status, self.owner, 'expires_at', 'created_at', 'updated_at', self.image_id, self.user_id, self.request_id, ) self.assertEqual(status, task.status) class TestTaskExecutorFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskExecutorFactory, self).setUp() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() def test_init(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) self.assertEqual(self.task_repo, task_executor_factory.task_repo) def test_new_task_executor(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) context = mock.Mock() with mock.patch.object(oslo_utils.importutils, 'import_class') as mock_import_class: mock_executor = mock.Mock() mock_import_class.return_value = mock_executor task_executor_factory.new_task_executor(context) mock_executor.assert_called_once_with(context, self.task_repo, self.image_repo, self.image_factory, admin_repo=None) def test_new_task_executor_with_admin(self): admin_repo = mock.MagicMock() task_executor_factory = domain.TaskExecutorFactory( self.task_repo, self.image_repo, self.image_factory, admin_repo=admin_repo) context = mock.Mock() with mock.patch.object(oslo_utils.importutils, 'import_class') as mock_import_class: mock_executor = mock.Mock() mock_import_class.return_value = mock_executor task_executor_factory.new_task_executor(context) mock_executor.assert_called_once_with(context, self.task_repo, self.image_repo, self.image_factory, admin_repo=admin_repo) def test_new_task_executor_error(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) context = mock.Mock() with mock.patch.object(oslo_utils.importutils, 'import_class') as mock_import_class: mock_import_class.side_effect = ImportError self.assertRaises(ImportError, task_executor_factory.new_task_executor, context) def test_new_task_eventlet_backwards_compatibility(self): context = mock.MagicMock() self.config(task_executor='eventlet', group='task') task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) # NOTE(flaper87): "eventlet" executor. short name to avoid > 79. te_evnt = task_executor_factory.new_task_executor(context) self.assertIsInstance(te_evnt, taskflow_executor.TaskExecutor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_domain_proxy.py0000664000175000017500000002623300000000000022557 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.domain import proxy import glance.tests.utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class FakeProxy(object): def __init__(self, base, *args, **kwargs): self.base = base self.args = args self.kwargs = kwargs class FakeRepo(object): def __init__(self, result=None): self.args = None self.kwargs = None self.result = result def fake_method(self, *args, **kwargs): self.args = args self.kwargs = kwargs return self.result get = fake_method list = fake_method add = fake_method save = fake_method remove = fake_method set_property_atomic = fake_method delete_property_atomic = fake_method class TestProxyRepoPlain(test_utils.BaseTestCase): def setUp(self): super(TestProxyRepoPlain, self).setUp() self.fake_repo = FakeRepo() self.proxy_repo = proxy.Repo(self.fake_repo) def _test_method(self, name, base_result, *args, **kwargs): self.fake_repo.result = base_result method = getattr(self.proxy_repo, name) proxy_result = method(*args, **kwargs) self.assertEqual(base_result, proxy_result) self.assertEqual(args, self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) def test_get(self): self._test_method('get', 'snarf', 'abcd') def test_list(self): self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn') def test_add(self): self._test_method('add', 'snuff', 'enough') def test_save(self): self._test_method('save', 'snuff', 'enough', from_state=None) def test_remove(self): self._test_method('add', None, 'flying') def test_set_property_atomic(self): image = mock.MagicMock() image.image_id = 'foo' self._test_method('set_property_atomic', None, image, 'foo', 'bar') def test_set_property_nonimage(self): self.assertRaises( AssertionError, self._test_method, 'set_property_atomic', None, 'notimage', 'foo', 'bar') def test_delete_property_atomic(self): image = mock.MagicMock() image.image_id = 'foo' self._test_method('delete_property_atomic', None, image, 'foo', 'bar') def test_delete_property_nonimage(self): self.assertRaises( AssertionError, self._test_method, 'delete_property_atomic', None, 'notimage', 'foo', 'bar') class TestProxyRepoWrapping(test_utils.BaseTestCase): def setUp(self): super(TestProxyRepoWrapping, self).setUp() self.fake_repo = FakeRepo() self.proxy_repo = proxy.Repo(self.fake_repo, item_proxy_class=FakeProxy, item_proxy_kwargs={'a': 1}) def _test_method(self, name, base_result, *args, **kwargs): self.fake_repo.result = base_result method = getattr(self.proxy_repo, name) proxy_result = method(*args, **kwargs) self.assertIsInstance(proxy_result, FakeProxy) self.assertEqual(base_result, proxy_result.base) self.assertEqual(0, len(proxy_result.args)) self.assertEqual({'a': 1}, proxy_result.kwargs) self.assertEqual(args, self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) def test_get(self): self.fake_repo.result = 'snarf' result = self.proxy_repo.get('some-id') self.assertIsInstance(result, FakeProxy) self.assertEqual(('some-id',), self.fake_repo.args) self.assertEqual({}, self.fake_repo.kwargs) self.assertEqual('snarf', result.base) self.assertEqual(tuple(), result.args) self.assertEqual({'a': 1}, result.kwargs) def test_list(self): self.fake_repo.result = ['scratch', 'sniff'] results = self.proxy_repo.list(2, prefix='s') self.assertEqual((2,), self.fake_repo.args) self.assertEqual({'prefix': 's'}, self.fake_repo.kwargs) self.assertEqual(2, len(results)) for i in range(2): self.assertIsInstance(results[i], FakeProxy) self.assertEqual(self.fake_repo.result[i], results[i].base) self.assertEqual(tuple(), results[i].args) self.assertEqual({'a': 1}, results[i].kwargs) def _test_method_with_proxied_argument(self, name, result, **kwargs): self.fake_repo.result = result item = FakeProxy('snoop') method = getattr(self.proxy_repo, name) proxy_result = method(item) self.assertEqual(('snoop',), self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) if result is None: self.assertIsNone(proxy_result) else: self.assertIsInstance(proxy_result, FakeProxy) self.assertEqual(result, proxy_result.base) self.assertEqual(tuple(), proxy_result.args) self.assertEqual({'a': 1}, proxy_result.kwargs) def test_add(self): self._test_method_with_proxied_argument('add', 'dog') def test_add_with_no_result(self): self._test_method_with_proxied_argument('add', None) def test_save(self): self._test_method_with_proxied_argument('save', 'dog', from_state=None) def test_save_with_no_result(self): self._test_method_with_proxied_argument('save', None, from_state=None) def test_remove(self): self._test_method_with_proxied_argument('remove', 'dog') def test_remove_with_no_result(self): self._test_method_with_proxied_argument('remove', None) class FakeImageFactory(object): def __init__(self, result=None): self.result = None self.kwargs = None def new_image(self, **kwargs): self.kwargs = kwargs return self.result class TestImageFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageFactory, self).setUp() self.factory = FakeImageFactory() def test_proxy_plain(self): proxy_factory = proxy.ImageFactory(self.factory) self.factory.result = 'eddard' image = proxy_factory.new_image(a=1, b='two') self.assertEqual('eddard', image) self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) def test_proxy_wrapping(self): proxy_factory = proxy.ImageFactory(self.factory, proxy_class=FakeProxy, proxy_kwargs={'dog': 'bark'}) self.factory.result = 'stark' image = proxy_factory.new_image(a=1, b='two') self.assertIsInstance(image, FakeProxy) self.assertEqual('stark', image.base) self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) class FakeImageMembershipFactory(object): def __init__(self, result=None): self.result = None self.image = None self.member_id = None def new_image_member(self, image, member_id): self.image = image self.member_id = member_id return self.result class TestImageMembershipFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageMembershipFactory, self).setUp() self.factory = FakeImageMembershipFactory() def test_proxy_plain(self): proxy_factory = proxy.ImageMembershipFactory(self.factory) self.factory.result = 'tyrion' membership = proxy_factory.new_image_member('jaime', 'cersei') self.assertEqual('tyrion', membership) self.assertEqual('jaime', self.factory.image) self.assertEqual('cersei', self.factory.member_id) def test_proxy_wrapped_membership(self): proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy, proxy_kwargs={'a': 1}) self.factory.result = 'tyrion' membership = proxy_factory.new_image_member('jaime', 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertEqual('tyrion', membership.base) self.assertEqual({'a': 1}, membership.kwargs) self.assertEqual('jaime', self.factory.image) self.assertEqual('cersei', self.factory.member_id) def test_proxy_wrapped_image(self): proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy) self.factory.result = 'tyrion' image = FakeProxy('jaime') membership = proxy_factory.new_image_member(image, 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertIsInstance(self.factory.image, FakeProxy) self.assertEqual('cersei', self.factory.member_id) def test_proxy_both_wrapped(self): class FakeProxy2(FakeProxy): pass proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy, proxy_kwargs={'b': 2}) self.factory.result = 'tyrion' image = FakeProxy2('jaime') membership = proxy_factory.new_image_member(image, 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertEqual('tyrion', membership.base) self.assertEqual({'b': 2}, membership.kwargs) self.assertIsInstance(self.factory.image, FakeProxy2) self.assertEqual('cersei', self.factory.member_id) class FakeImage(object): def __init__(self, result=None): self.result = result class TestTaskFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskFactory, self).setUp() self.factory = mock.Mock() self.fake_type = 'import' self.fake_owner = "owner" def test_proxy_plain(self): proxy_factory = proxy.TaskFactory(self.factory) proxy_factory.new_task( type=self.fake_type, owner=self.fake_owner ) self.factory.new_task.assert_called_once_with( type=self.fake_type, owner=self.fake_owner ) def test_proxy_wrapping(self): proxy_factory = proxy.TaskFactory( self.factory, task_proxy_class=FakeProxy, task_proxy_kwargs={'dog': 'bark'}) self.factory.new_task.return_value = 'fake_task' task = proxy_factory.new_task( type=self.fake_type, owner=self.fake_owner ) self.factory.new_task.assert_called_once_with( type=self.fake_type, owner=self.fake_owner ) self.assertIsInstance(task, FakeProxy) self.assertEqual('fake_task', task.base) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_gateway.py0000664000175000017500000001655500000000000021516 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.api import property_protections from glance import context from glance import gateway from glance import notifier from glance import quota from glance.tests.unit import utils as unit_test_utils import glance.tests.utils as test_utils class TestGateway(test_utils.BaseTestCase): def setUp(self): super(TestGateway, self).setUp() self.gateway = gateway.Gateway() self.context = mock.sentinel.context @mock.patch('glance.domain.TaskExecutorFactory') def test_get_task_executor_factory(self, mock_factory): @mock.patch.object(self.gateway, 'get_task_repo') @mock.patch.object(self.gateway, 'get_repo') @mock.patch.object(self.gateway, 'get_image_factory') def _test(mock_gif, mock_gr, mock_gtr): self.gateway.get_task_executor_factory(self.context) mock_gtr.assert_called_once_with(self.context) mock_gr.assert_called_once_with(self.context) mock_gif.assert_called_once_with(self.context) mock_factory.assert_called_once_with( mock_gtr.return_value, mock_gr.return_value, mock_gif.return_value, admin_repo=None) _test() @mock.patch('glance.domain.TaskExecutorFactory') def test_get_task_executor_factory_with_admin(self, mock_factory): @mock.patch.object(self.gateway, 'get_task_repo') @mock.patch.object(self.gateway, 'get_repo') @mock.patch.object(self.gateway, 'get_image_factory') def _test(mock_gif, mock_gr, mock_gtr): mock_gr.side_effect = [mock.sentinel.image_repo, mock.sentinel.admin_repo] self.gateway.get_task_executor_factory( self.context, admin_context=mock.sentinel.admin_context) mock_gtr.assert_called_once_with(self.context) mock_gr.assert_has_calls([ mock.call(self.context), mock.call(mock.sentinel.admin_context) ]) mock_gif.assert_called_once_with(self.context) mock_factory.assert_called_once_with( mock_gtr.return_value, mock.sentinel.image_repo, mock_gif.return_value, admin_repo=mock.sentinel.admin_repo) _test() def test_get_repo(self): repo = self.gateway.get_repo(self.context) self.assertIsInstance(repo, notifier.ImageRepoProxy) @mock.patch('glance.common.property_utils.PropertyRules._load_rules') def test_get_repo_with_pp(self, mock_load): self.config(property_protection_file='foo') repo = self.gateway.get_repo(self.context) self.assertIsInstance(repo, property_protections.ProtectedImageRepoProxy) def test_get_image_factory(self): factory = self.gateway.get_image_factory(self.context) self.assertIsInstance(factory, notifier.ImageFactoryProxy) @mock.patch('glance.common.property_utils.PropertyRules._load_rules') def test_get_image_factory_with_pp(self, mock_load): self.config(property_protection_file='foo') factory = self.gateway.get_image_factory(self.context) self.assertIsInstance(factory, property_protections.ProtectedImageFactoryProxy) def test_get_repo_member_property(self): """Test that the image.member property is propagated all the way from the DB to the top of the gateway repo stack. """ db_api = unit_test_utils.FakeDB() gw = gateway.Gateway(db_api=db_api) # Get the UUID1 image as TENANT1 ctxt = context.RequestContext(tenant=unit_test_utils.TENANT1) repo = gw.get_repo(ctxt) image = repo.get(unit_test_utils.UUID1) # We own the image, so member is None self.assertIsNone(image.member) # Get the UUID1 image as TENANT2 ctxt = context.RequestContext(tenant=unit_test_utils.TENANT2) repo = gw.get_repo(ctxt) image = repo.get(unit_test_utils.UUID1) # We are a member, so member is our tenant id self.assertEqual(unit_test_utils.TENANT2, image.member) def test_get_namespace_repo(self): repo = self.gateway.get_metadef_namespace_repo(self.context) self.assertIsInstance(repo, notifier.MetadefNamespaceRepoProxy) def test_get_namespace_factory(self): repo = self.gateway.get_metadef_namespace_factory(self.context) self.assertIsInstance(repo, notifier.MetadefNamespaceFactoryProxy) def test_get_object_repo(self): repo = self.gateway.get_metadef_object_repo(self.context) self.assertIsInstance(repo, notifier.MetadefObjectRepoProxy) def test_get_object_factory(self): repo = self.gateway.get_metadef_object_factory(self.context) self.assertIsInstance(repo, notifier.MetadefObjectFactoryProxy) def test_get_resourcetype_repo(self): repo = self.gateway.get_metadef_resource_type_repo(self.context) self.assertIsInstance(repo, notifier.MetadefResourceTypeRepoProxy) def test_get_resource_type_factory(self): repo = self.gateway.get_metadef_resource_type_factory(self.context) self.assertIsInstance(repo, notifier.MetadefResourceTypeFactoryProxy) def test_get_property_repo(self): repo = self.gateway.get_metadef_property_repo(self.context) self.assertIsInstance(repo, notifier.MetadefPropertyRepoProxy) def test_get_property_factory(self): repo = self.gateway.get_metadef_property_factory(self.context) self.assertIsInstance(repo, notifier.MetadefPropertyFactoryProxy) def test_get_tag_repo(self): repo = self.gateway.get_metadef_tag_repo(self.context) self.assertIsInstance(repo, notifier.MetadefTagRepoProxy) def test_get_tag_factory(self): repo = self.gateway.get_metadef_tag_factory(self.context) self.assertIsInstance(repo, notifier.MetadefTagFactoryProxy) def test_get_member_repo(self): repo = self.gateway.get_member_repo(mock.sentinel.image, self.context) self.assertIsInstance(repo, notifier.ImageMemberRepoProxy) def test_get_member_factory(self): repo = self.gateway.get_image_member_factory(self.context) self.assertIsInstance(repo, quota.ImageMemberFactoryProxy) def test_get_task_repo(self): repo = self.gateway.get_task_repo(self.context) self.assertIsInstance(repo, notifier.TaskRepoProxy) def test_get_task_factory(self): repo = self.gateway.get_task_factory(self.context) self.assertIsInstance(repo, notifier.TaskFactoryProxy) def test_get_task_stub_repo(self): repo = self.gateway.get_task_stub_repo(self.context) self.assertIsInstance(repo, notifier.TaskStubRepoProxy) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_glance_manage.py0000664000175000017500000001071400000000000022605 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # Copyright 2016 NTT Data. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_db import exception as db_exception from glance.cmd import manage from glance import context from glance.db.sqlalchemy import api as db_api import glance.tests.utils as test_utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' class DBCommandsTestCase(test_utils.BaseTestCase): def setUp(self): super(DBCommandsTestCase, self).setUp() self.commands = manage.DbCommands() self.context = context.RequestContext( user=USER1, tenant=TENANT1) @mock.patch.object(db_api, 'purge_deleted_rows') @mock.patch.object(context, 'get_admin_context') def test_purge_command(self, mock_context, mock_db_purge): mock_context.return_value = self.context self.commands.purge(0, 100) mock_db_purge.assert_called_once_with(self.context, 0, 100) def test_purge_command_rows_less_minus_one(self): exit = self.assertRaises(SystemExit, self.commands.purge, 1, -2) self.assertEqual("Minimal rows limit is -1.", exit.code) def test_purge_invalid_age_in_days(self): age_in_days = 'abcd' ex = self.assertRaises(SystemExit, self.commands.purge, age_in_days) expected = ("Invalid int value for age_in_days: " "%(age_in_days)s") % {'age_in_days': age_in_days} self.assertEqual(expected, ex.code) def test_purge_negative_age_in_days(self): ex = self.assertRaises(SystemExit, self.commands.purge, '-1') self.assertEqual("Must supply a non-negative value for age.", ex.code) def test_purge_invalid_max_rows(self): max_rows = 'abcd' ex = self.assertRaises(SystemExit, self.commands.purge, 1, max_rows) expected = ("Invalid int value for max_rows: " "%(max_rows)s") % {'max_rows': max_rows} self.assertEqual(expected, ex.code) @mock.patch.object(db_api, 'purge_deleted_rows') @mock.patch.object(context, 'get_admin_context') def test_purge_max_rows(self, mock_context, mock_db_purge): mock_context.return_value = self.context value = (2 ** 31) - 1 self.commands.purge(age_in_days=1, max_rows=value) mock_db_purge.assert_called_once_with(self.context, 1, value) def test_purge_command_exceeded_maximum_rows(self): # value(2 ** 31) is greater than max_rows(2147483647) by 1. value = 2 ** 31 ex = self.assertRaises(SystemExit, self.commands.purge, age_in_days=1, max_rows=value) expected = "'max_rows' value out of range, must not exceed 2147483647." self.assertEqual(expected, ex.code) @mock.patch('glance.db.sqlalchemy.api.purge_deleted_rows') def test_purge_command_fk_constraint_failure(self, purge_deleted_rows): purge_deleted_rows.side_effect = db_exception.DBReferenceError( 'fake_table', 'fake_constraint', 'fake_key', 'fake_key_table') exit = self.assertRaises(SystemExit, self.commands.purge, 10, 100) self.assertEqual("Purge command failed, check glance-manage logs" " for more details.", exit.code) @mock.patch.object(db_api, 'purge_deleted_rows') @mock.patch.object(context, 'get_admin_context') def test_purge_command_purge_all(self, mock_context, mock_db_purge): mock_context.return_value = self.context self.commands.purge(max_rows=-1) mock_db_purge.assert_called_once_with(self.context, 30, -1) @mock.patch.object(db_api, 'purge_deleted_rows_from_images') @mock.patch.object(context, 'get_admin_context') def test_purge_images_table_purge_all(self, mock_context, mock_db_purge): mock_context.return_value = self.context self.commands.purge_images_table(max_rows=-1) mock_db_purge.assert_called_once_with(self.context, 180, -1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_glance_replicator.py0000664000175000017500000005602200000000000023523 0ustar00zuulzuul00000000000000# Copyright 2012 Michael Still and Canonical Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import http.client as http import io from unittest import mock import copy import os import sys import uuid import fixtures from oslo_serialization import jsonutils import webob from glance.cmd import replicator as glance_replicator from glance.common import exception from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils IMG_RESPONSE_ACTIVE = { 'content-length': '0', 'property-image_state': 'available', 'min_ram': '0', 'disk_format': 'aki', 'updated_at': '2012-06-25T02:10:36', 'date': 'Thu, 28 Jun 2012 07:20:05 GMT', 'owner': '8aef75b5c0074a59aa99188fdb4b9e90', 'id': '6d55dd55-053a-4765-b7bc-b30df0ea3861', 'size': '4660272', 'property-image_location': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-' 'vmlinuz-generic.manifest.xml', 'property-architecture': 'x86_64', 'etag': 'f46cfe7fb3acaff49a3567031b9b53bb', 'location': 'http://127.0.0.1:9292/v1/images/' '6d55dd55-053a-4765-b7bc-b30df0ea3861', 'container_format': 'aki', 'status': 'active', 'deleted': 'False', 'min_disk': '0', 'is_public': 'False', 'name': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-vmlinuz-generic', 'checksum': 'f46cfe7fb3acaff49a3567031b9b53bb', 'created_at': '2012-06-25T02:10:32', 'protected': 'False', 'content-type': 'text/html; charset=UTF-8' } IMG_RESPONSE_QUEUED = copy.copy(IMG_RESPONSE_ACTIVE) IMG_RESPONSE_QUEUED['status'] = 'queued' IMG_RESPONSE_QUEUED['id'] = '49b2c782-ee10-4692-84f8-3942e9432c4b' IMG_RESPONSE_QUEUED['location'] = ('http://127.0.0.1:9292/v1/images/' + IMG_RESPONSE_QUEUED['id']) class FakeHTTPConnection(object): def __init__(self): self.count = 0 self.reqs = {} self.last_req = None self.host = 'localhost' self.port = 9292 def prime_request(self, method, url, in_body, in_headers, out_code, out_body, out_headers): if not url.startswith('/'): url = '/' + url url = unit_test_utils.sort_url_by_qs_keys(url) hkeys = sorted(in_headers.keys()) hashable = (method, url, in_body, ' '.join(hkeys)) flat_headers = [] for key in out_headers: flat_headers.append((key, out_headers[key])) self.reqs[hashable] = (out_code, out_body, flat_headers) def request(self, method, url, body, headers): self.count += 1 url = unit_test_utils.sort_url_by_qs_keys(url) hkeys = sorted(headers.keys()) hashable = (method, url, body, ' '.join(hkeys)) if hashable not in self.reqs: options = [] for h in self.reqs: options.append(repr(h)) raise Exception('No such primed request: %s "%s"\n' '%s\n\n' 'Available:\n' '%s' % (method, url, hashable, '\n\n'.join(options))) self.last_req = hashable def getresponse(self): class FakeResponse(object): def __init__(self, args): (code, body, headers) = args self.body = io.StringIO(body) self.headers = headers self.status = code def read(self, count=1000000): return self.body.read(count) def getheaders(self): return self.headers return FakeResponse(self.reqs[self.last_req]) class ImageServiceTestCase(test_utils.BaseTestCase): def test_rest_errors(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') for code, exc in [(http.BAD_REQUEST, webob.exc.HTTPBadRequest), (http.UNAUTHORIZED, webob.exc.HTTPUnauthorized), (http.FORBIDDEN, webob.exc.HTTPForbidden), (http.CONFLICT, webob.exc.HTTPConflict), (http.INTERNAL_SERVER_ERROR, webob.exc.HTTPInternalServerError)]: c.conn.prime_request('GET', ('v1/images/' '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'), '', {'x-auth-token': 'noauth'}, code, '', {}) self.assertRaises(exc, c.get_image, '5dcddce0-cba5-4f18-9cf4-9853c7b207a6') def test_rest_get_images(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') # Two images, one of which is queued resp = {'images': [IMG_RESPONSE_ACTIVE, IMG_RESPONSE_QUEUED]} c.conn.prime_request('GET', 'v1/images/detail?is_public=None', '', {'x-auth-token': 'noauth'}, http.OK, jsonutils.dumps(resp), {}) c.conn.prime_request('GET', ('v1/images/detail?marker=%s&is_public=None' % IMG_RESPONSE_QUEUED['id']), '', {'x-auth-token': 'noauth'}, http.OK, jsonutils.dumps({'images': []}), {}) imgs = list(c.get_images()) self.assertEqual(2, len(imgs)) self.assertEqual(2, c.conn.count) def test_rest_get_image(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_contents = 'THISISTHEIMAGEBODY' c.conn.prime_request('GET', 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], '', {'x-auth-token': 'noauth'}, http.OK, image_contents, IMG_RESPONSE_ACTIVE) body = c.get_image(IMG_RESPONSE_ACTIVE['id']) self.assertEqual(image_contents, body.read()) def test_rest_header_list_to_dict(self): i = [('x-image-meta-banana', 42), ('gerkin', 12), ('x-image-meta-property-frog', 11), ('x-image-meta-property-duck', 12)] o = glance_replicator.ImageService._header_list_to_dict(i) self.assertIn('banana', o) self.assertIn('gerkin', o) self.assertIn('properties', o) self.assertIn('frog', o['properties']) self.assertIn('duck', o['properties']) self.assertNotIn('x-image-meta-banana', o) def test_rest_get_image_meta(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') c.conn.prime_request('HEAD', 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], '', {'x-auth-token': 'noauth'}, http.OK, '', IMG_RESPONSE_ACTIVE) header = c.get_image_meta(IMG_RESPONSE_ACTIVE['id']) self.assertIn('id', header) def test_rest_dict_to_headers(self): i = {'banana': 42, 'gerkin': 12, 'properties': {'frog': 1, 'kernel_id': None} } o = glance_replicator.ImageService._dict_to_headers(i) self.assertIn('x-image-meta-banana', o) self.assertIn('x-image-meta-gerkin', o) self.assertIn('x-image-meta-property-frog', o) self.assertIn('x-image-meta-property-kernel_id', o) self.assertEqual(o['x-image-meta-property-kernel_id'], '') self.assertNotIn('properties', o) def test_rest_add_image(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_body = 'THISISANIMAGEBODYFORSURE!' image_meta_with_proto = { 'x-auth-token': 'noauth', 'Content-Type': 'application/octet-stream', 'Content-Length': len(image_body) } for key in IMG_RESPONSE_ACTIVE: image_meta_with_proto[ 'x-image-meta-%s' % key] = IMG_RESPONSE_ACTIVE[key] c.conn.prime_request('POST', 'v1/images', image_body, image_meta_with_proto, http.OK, '', IMG_RESPONSE_ACTIVE) headers, body = c.add_image(IMG_RESPONSE_ACTIVE, image_body) self.assertEqual(IMG_RESPONSE_ACTIVE, headers) self.assertEqual(1, c.conn.count) def test_rest_add_image_meta(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_meta = {'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'} image_meta_headers = glance_replicator.ImageService._dict_to_headers( image_meta) image_meta_headers['x-auth-token'] = 'noauth' image_meta_headers['Content-Type'] = 'application/octet-stream' c.conn.prime_request('PUT', 'v1/images/%s' % image_meta['id'], '', image_meta_headers, http.OK, '', '') headers, body = c.add_image_meta(image_meta) class FakeHttpResponse(object): def __init__(self, headers, data): self.headers = headers self.data = io.BytesIO(data) def getheaders(self): return self.headers def read(self, amt=None): return self.data.read(amt) FAKEIMAGES = [{'status': 'active', 'size': 100, 'dontrepl': 'banana', 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6', 'name': 'x1'}, {'status': 'deleted', 'size': 200, 'dontrepl': 'banana', 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b', 'name': 'x2'}, {'status': 'active', 'size': 300, 'dontrepl': 'banana', 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db', 'name': 'x3'}] FAKEIMAGES_LIVEMASTER = [{'status': 'active', 'size': 100, 'dontrepl': 'banana', 'name': 'x1', 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'}, {'status': 'deleted', 'size': 200, 'dontrepl': 'banana', 'name': 'x2', 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b'}, {'status': 'deleted', 'size': 300, 'dontrepl': 'banana', 'name': 'x3', 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db'}, {'status': 'active', 'size': 100, 'dontrepl': 'banana', 'name': 'x4', 'id': '15648dd7-8dd0-401c-bd51-550e1ba9a088'}] class FakeImageService(object): def __init__(self, http_conn, authtoken): self.authtoken = authtoken def get_images(self): if self.authtoken == 'livesourcetoken': return FAKEIMAGES_LIVEMASTER return FAKEIMAGES def get_image(self, id): return FakeHttpResponse({}, b'data') def get_image_meta(self, id): for img in FAKEIMAGES: if img['id'] == id: return img return {} def add_image_meta(self, meta): return {'status': http.OK}, None def add_image(self, meta, data): return {'status': http.OK}, None def get_image_service(): return FakeImageService def check_no_args(command, args): options = collections.UserDict() no_args_error = False orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service command(options, args) except TypeError as e: if str(e) == "Too few arguments.": no_args_error = True finally: glance_replicator.get_image_service = orig_img_service return no_args_error def check_bad_args(command, args): options = collections.UserDict() bad_args_error = False orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service command(options, args) except ValueError: bad_args_error = True finally: glance_replicator.get_image_service = orig_img_service return bad_args_error class ReplicationCommandsTestCase(test_utils.BaseTestCase): @mock.patch.object(glance_replicator, 'lookup_command') def test_help(self, mock_lookup_command): option = mock.Mock() mock_lookup_command.return_value = "fake_return" glance_replicator.print_help(option, []) glance_replicator.print_help(option, ['dump']) glance_replicator.print_help(option, ['fake_command']) self.assertEqual(2, mock_lookup_command.call_count) def test_replication_size(self): options = collections.UserDict() options.targettoken = 'targettoken' args = ['localhost:9292'] stdout = sys.stdout orig_img_service = glance_replicator.get_image_service sys.stdout = io.StringIO() try: glance_replicator.get_image_service = get_image_service glance_replicator.replication_size(options, args) sys.stdout.seek(0) output = sys.stdout.read() finally: sys.stdout = stdout glance_replicator.get_image_service = orig_img_service output = output.rstrip() self.assertEqual( 'Total size is 400 bytes (400.0 B) across 2 images', output ) def test_replication_size_with_no_args(self): args = [] command = glance_replicator.replication_size self.assertTrue(check_no_args(command, args)) def test_replication_size_with_args_is_None(self): args = None command = glance_replicator.replication_size self.assertTrue(check_no_args(command, args)) def test_replication_size_with_bad_args(self): args = ['aaa'] command = glance_replicator.replication_size self.assertTrue(check_bad_args(command, args)) def test_human_readable_size(self): _human_readable_size = glance_replicator._human_readable_size self.assertEqual('0.0 B', _human_readable_size(0)) self.assertEqual('1.0 B', _human_readable_size(1)) self.assertEqual('512.0 B', _human_readable_size(512)) self.assertEqual('1.0 KiB', _human_readable_size(1024)) self.assertEqual('2.0 KiB', _human_readable_size(2048)) self.assertEqual('8.0 KiB', _human_readable_size(8192)) self.assertEqual('64.0 KiB', _human_readable_size(65536)) self.assertEqual('93.3 KiB', _human_readable_size(95536)) self.assertEqual('117.7 MiB', _human_readable_size(123456789)) self.assertEqual('36.3 GiB', _human_readable_size(39022543360)) def test_replication_dump(self): tempdir = self.useFixture(fixtures.TempDir()).path options = collections.UserDict() options.chunksize = 4096 options.sourcetoken = 'sourcetoken' options.metaonly = False args = ['localhost:9292', tempdir] orig_img_service = glance_replicator.get_image_service self.addCleanup(setattr, glance_replicator, 'get_image_service', orig_img_service) glance_replicator.get_image_service = get_image_service glance_replicator.replication_dump(options, args) for active in ['5dcddce0-cba5-4f18-9cf4-9853c7b207a6', '37ff82db-afca-48c7-ae0b-ddc7cf83e3db']: imgfile = os.path.join(tempdir, active) self.assertTrue(os.path.exists(imgfile)) self.assertTrue(os.path.exists('%s.img' % imgfile)) with open(imgfile) as f: d = jsonutils.loads(f.read()) self.assertIn('status', d) self.assertIn('id', d) self.assertIn('size', d) for inactive in ['f4da1d2a-40e8-4710-b3aa-0222a4cc887b']: imgfile = os.path.join(tempdir, inactive) self.assertTrue(os.path.exists(imgfile)) self.assertFalse(os.path.exists('%s.img' % imgfile)) with open(imgfile) as f: d = jsonutils.loads(f.read()) self.assertIn('status', d) self.assertIn('id', d) self.assertIn('size', d) def test_replication_dump_with_no_args(self): args = [] command = glance_replicator.replication_dump self.assertTrue(check_no_args(command, args)) def test_replication_dump_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_dump self.assertTrue(check_bad_args(command, args)) def test_replication_load(self): tempdir = self.useFixture(fixtures.TempDir()).path def write_image(img, data): imgfile = os.path.join(tempdir, img['id']) with open(imgfile, 'w') as f: f.write(jsonutils.dumps(img)) if data: with open('%s.img' % imgfile, 'w') as f: f.write(data) for img in FAKEIMAGES: cimg = copy.copy(img) # We need at least one image where the stashed metadata on disk # is newer than what the fake has if cimg['id'] == '5dcddce0-cba5-4f18-9cf4-9853c7b207a6': cimg['extra'] = 'thisissomeextra' # This is an image where the metadata change should be ignored if cimg['id'] == 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b': cimg['dontrepl'] = 'thisisyetmoreextra' write_image(cimg, 'kjdhfkjshdfkjhsdkfd') # And an image which isn't on the destination at all new_id = str(uuid.uuid4()) cimg['id'] = new_id write_image(cimg, 'dskjfhskjhfkfdhksjdhf') # And an image which isn't on the destination, but lacks image # data new_id_missing_data = str(uuid.uuid4()) cimg['id'] = new_id_missing_data write_image(cimg, None) # A file which should be ignored badfile = os.path.join(tempdir, 'kjdfhf') with open(badfile, 'w') as f: f.write(jsonutils.dumps([1, 2, 3, 4, 5])) # Finally, we're ready to test options = collections.UserDict() options.dontreplicate = 'dontrepl dontreplabsent' options.targettoken = 'targettoken' args = ['localhost:9292', tempdir] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service updated = glance_replicator.replication_load(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertIn('5dcddce0-cba5-4f18-9cf4-9853c7b207a6', updated) self.assertNotIn('f4da1d2a-40e8-4710-b3aa-0222a4cc887b', updated) self.assertIn(new_id, updated) self.assertNotIn(new_id_missing_data, updated) def test_replication_load_with_no_args(self): args = [] command = glance_replicator.replication_load self.assertTrue(check_no_args(command, args)) def test_replication_load_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_load self.assertTrue(check_bad_args(command, args)) def test_replication_livecopy(self): options = collections.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' options.sourcetoken = 'livesourcetoken' options.targettoken = 'livetargettoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service updated = glance_replicator.replication_livecopy(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertEqual(2, len(updated)) def test_replication_livecopy_with_no_args(self): args = [] command = glance_replicator.replication_livecopy self.assertTrue(check_no_args(command, args)) def test_replication_livecopy_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_livecopy self.assertTrue(check_bad_args(command, args)) def test_replication_compare(self): options = collections.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' options.sourcetoken = 'livesourcetoken' options.targettoken = 'livetargettoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service differences = glance_replicator.replication_compare(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertIn('15648dd7-8dd0-401c-bd51-550e1ba9a088', differences) self.assertEqual(differences['15648dd7-8dd0-401c-bd51-550e1ba9a088'], 'missing') self.assertIn('37ff82db-afca-48c7-ae0b-ddc7cf83e3db', differences) self.assertEqual(differences['37ff82db-afca-48c7-ae0b-ddc7cf83e3db'], 'diff') def test_replication_compare_with_no_args(self): args = [] command = glance_replicator.replication_compare self.assertTrue(check_no_args(command, args)) def test_replication_compare_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_compare self.assertTrue(check_bad_args(command, args)) class ReplicationUtilitiesTestCase(test_utils.BaseTestCase): def test_check_upload_response_headers(self): glance_replicator._check_upload_response_headers({'status': 'active'}, None) d = {'image': {'status': 'active'}} glance_replicator._check_upload_response_headers({}, jsonutils.dumps(d)) self.assertRaises( exception.UploadException, glance_replicator._check_upload_response_headers, {}, None) def test_image_present(self): client = FakeImageService(None, 'noauth') self.assertTrue(glance_replicator._image_present( client, '5dcddce0-cba5-4f18-9cf4-9853c7b207a6')) self.assertFalse(glance_replicator._image_present( client, uuid.uuid4())) def test_dict_diff(self): a = {'a': 1, 'b': 2, 'c': 3} b = {'a': 1, 'b': 2} c = {'a': 1, 'b': 1, 'c': 3} d = {'a': 1, 'b': 2, 'c': 3, 'd': 4} # Only things that the first dict has which the second dict doesn't # matter here. self.assertFalse(glance_replicator._dict_diff(a, a)) self.assertTrue(glance_replicator._dict_diff(a, b)) self.assertTrue(glance_replicator._dict_diff(a, c)) self.assertFalse(glance_replicator._dict_diff(a, d)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_housekeeping.py0000664000175000017500000002451000000000000022531 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import glance_store from oslo_config import cfg from oslo_utils.fixture import uuidsentinel as uuids from glance.common import exception from glance import context from glance import housekeeping import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF class TestStagingStoreHousekeeping(test_utils.BaseTestCase): def _store_dir(self, store): return os.path.join(self.test_dir, store) def setUp(self): super(TestStagingStoreHousekeeping, self).setUp() self.config(enabled_backends={'store1': 'file'}) glance_store.register_store_opts( CONF, reserved_stores={'os_glance_staging_store': 'file'}) self.config(default_backend='store1', group='glance_store') self.config(filesystem_store_datadir=self._store_dir('store1'), group='store1') self.config(filesystem_store_datadir=self._store_dir('staging'), group='os_glance_staging_store') glance_store.create_multi_stores( CONF, reserved_stores={'os_glance_staging_store': 'file'}) self.db = unit_test_utils.FakeDB(initialize=False) self.cleaner = housekeeping.StagingStoreCleaner(self.db) self.context = context.get_admin_context() def test_get_staging_path(self): expected = os.path.join(self.test_dir, 'staging') self.assertEqual(expected, housekeeping.staging_store_path()) def test_get_staging_path_single_store(self): self.config(enabled_backends={}) expected = '/tmp/staging/' self.assertEqual(expected, housekeeping.staging_store_path()) @mock.patch('glance.common.store_utils.get_dir_separator') def test_assert_staging_scheme(self, mock_get_dir_separator): # NOTE(danms): This cannot happen now, but since we need to be # opinionated about the fact that the URL is a file path, better # to check for it, in case it changes in the future. mock_get_dir_separator.return_value = ('/', 'http://foo') self.assertRaises(exception.GlanceException, lambda: housekeeping.staging_store_path()) def test_assert_staging_scheme_on_init(self): # NOTE(danms): Make this a single-store scenario, which will cover # our assertion about node_staging_uri while we test for the # assert-on-init behavior. self.config(enabled_backends={}, node_staging_uri='http://good.luck') self.assertRaises(exception.GlanceException, housekeeping.staging_store_path) def test_get_image_id(self): self.assertEqual(uuids.some_random_uuid, self.cleaner.get_image_id(uuids.some_random_uuid)) self.assertEqual(uuids.some_random_uuid, self.cleaner.get_image_id( '%s.qcow2' % uuids.some_random_uuid)) self.assertEqual(uuids.some_random_uuid, self.cleaner.get_image_id( '%s.uc' % uuids.some_random_uuid)) self.assertEqual(uuids.some_random_uuid, self.cleaner.get_image_id( '%s.blah' % uuids.some_random_uuid)) self.assertIsNone(self.cleaner.get_image_id('foo')) self.assertIsNone(self.cleaner.get_image_id('foo.bar')) def test_is_valid_image(self): image = self.db.image_create(self.context, {'status': 'queued'}) self.assertTrue(self.cleaner.is_valid_image(image['id'])) self.assertFalse(self.cleaner.is_valid_image('foo')) def test_is_valid_image_deleted(self): image = self.db.image_create(self.context, {'status': 'queued'}) self.db.image_destroy(self.context, image['id']) self.assertFalse(self.cleaner.is_valid_image(image['id'])) @mock.patch('os.remove') def test_delete_file(self, mock_remove): self.assertTrue(self.cleaner.delete_file('foo')) os.remove.assert_called_once_with('foo') @mock.patch('os.remove') @mock.patch.object(housekeeping, 'LOG') def test_delete_file_not_found(self, mock_LOG, mock_remove): os.remove.side_effect = FileNotFoundError('foo is gone') # We should ignore a file-not-found error self.assertTrue(self.cleaner.delete_file('foo')) os.remove.assert_called_once_with('foo') mock_LOG.error.assert_not_called() @mock.patch('os.remove') @mock.patch.object(housekeeping, 'LOG') def test_delete_file_failed(self, mock_LOG, mock_remove): # Any other error should report failure and log os.remove.side_effect = Exception('insufficient plutonium') self.assertFalse(self.cleaner.delete_file('foo')) os.remove.assert_called_once_with('foo') mock_LOG.error.assert_called_once_with( 'Failed to delete stale staging path %(path)r: %(err)s', {'path': 'foo', 'err': 'insufficient plutonium'}) @mock.patch('os.listdir') @mock.patch('os.remove') @mock.patch.object(housekeeping, 'LOG') def test_clean_orphaned_staging_residue_empty(self, mock_LOG, mock_remove, mock_listdir): mock_listdir.return_value = [] self.cleaner.clean_orphaned_staging_residue() mock_listdir.assert_called_once_with(housekeeping.staging_store_path()) mock_remove.assert_not_called() mock_LOG.assert_not_called() @mock.patch('os.remove') @mock.patch('os.listdir') @mock.patch.object(housekeeping, 'LOG') def test_clean_orphaned_staging_residue(self, mock_LOG, mock_listdir, mock_remove): staging = housekeeping.staging_store_path() image = self.db.image_create(self.context, {'status': 'queued'}) mock_listdir.return_value = ['notanimageid', image['id'], uuids.stale, uuids.midconvert, '%s.qcow2' % uuids.midconvert] self.cleaner.clean_orphaned_staging_residue() # NOTE(danms): We should have deleted the stale image file expected_stale = os.path.join(staging, uuids.stale) # NOTE(danms): We should have deleted the mid-convert base image and # the target file expected_mc = os.path.join(staging, uuids.midconvert) expected_mc_target = os.path.join(staging, '%s.qcow2' % uuids.midconvert) mock_remove.assert_has_calls([ mock.call(expected_stale), mock.call(expected_mc), mock.call(expected_mc_target), ]) # NOTE(danms): We should have cleaned the one (which we os.remove()'d) # above, and ignore the invalid and active ones. No errors this time. mock_LOG.debug.assert_has_calls([ mock.call('Found %i files in staging directory for potential ' 'cleanup', 5), mock.call('Staging directory contains unexpected non-image file ' '%r; ignoring', 'notanimageid'), mock.call('Stale staging residue found for image %(uuid)s: ' '%(file)r; deleting now.', {'uuid': uuids.stale, 'file': expected_stale}), mock.call('Stale staging residue found for image %(uuid)s: ' '%(file)r; deleting now.', {'uuid': uuids.midconvert, 'file': expected_mc}), mock.call('Stale staging residue found for image %(uuid)s: ' '%(file)r; deleting now.', {'uuid': uuids.midconvert, 'file': expected_mc_target}), mock.call('Cleaned %(cleaned)i stale staging files, ' '%(ignored)i ignored (%(error)i errors)', {'cleaned': 3, 'ignored': 2, 'error': 0}), ]) @mock.patch('os.listdir') @mock.patch('os.remove') @mock.patch.object(housekeeping, 'LOG') def test_clean_orphaned_staging_residue_handles_errors(self, mock_LOG, mock_remove, mock_listdir): staging = housekeeping.staging_store_path() mock_listdir.return_value = [uuids.gone, uuids.error] mock_remove.side_effect = [FileNotFoundError('gone'), PermissionError('not yours')] self.cleaner.clean_orphaned_staging_residue() # NOTE(danms): We should only have logged an error for the # permission failure mock_LOG.error.assert_called_once_with( 'Failed to delete stale staging path %(path)r: %(err)s', {'path': os.path.join(staging, uuids.error), 'err': 'not yours'}) # NOTE(danms): We should report the permission failure as an error, # but not the already-gone or invalid ones. mock_LOG.debug.assert_has_calls([ mock.call('Found %i files in staging directory for potential ' 'cleanup', 2), mock.call('Stale staging residue found for image %(uuid)s: ' '%(file)r; deleting now.', {'uuid': uuids.gone, 'file': os.path.join(staging, uuids.gone)}), mock.call('Stale staging residue found for image %(uuid)s: ' '%(file)r; deleting now.', {'uuid': uuids.error, 'file': os.path.join(staging, uuids.error)}), mock.call('Cleaned %(cleaned)i stale staging files, ' '%(ignored)i ignored (%(error)i errors)', {'cleaned': 1, 'ignored': 0, 'error': 1}), ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_image_cache.py0000664000175000017500000007011000000000000022245 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import contextmanager import datetime import errno import io import os import tempfile import time from unittest import mock import fixtures import glance_store as store from oslo_config import cfg from oslo_utils import fileutils from oslo_utils import secretutils from oslo_utils import units from glance import async_ from glance.common import exception from glance import context from glance import gateway as glance_gateway from glance import image_cache from glance.image_cache import prefetcher from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils from glance.tests.utils import skip_if_disabled from glance.tests.utils import xattr_writes_supported FIXTURE_LENGTH = 1024 FIXTURE_DATA = b'*' * FIXTURE_LENGTH CONF = cfg.CONF class ImageCacheTestCase(object): def _setup_fixture_file(self): FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertFalse(self.cache.is_cached(1)) self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) self.assertTrue(self.cache.is_cached(1)) @skip_if_disabled def test_is_cached(self): """Verify is_cached(1) returns 0, then add something to the cache and verify is_cached(1) returns 1. """ self._setup_fixture_file() @skip_if_disabled def test_read(self): """Verify is_cached(1) returns 0, then add something to the cache and verify after a subsequent read from the cache that is_cached(1) returns 1. """ self._setup_fixture_file() buff = io.BytesIO() with self.cache.open_for_read(1) as cache_file: for chunk in cache_file: buff.write(chunk) self.assertEqual(FIXTURE_DATA, buff.getvalue()) @skip_if_disabled def test_open_for_read(self): """Test convenience wrapper for opening a cache file via its image identifier. """ self._setup_fixture_file() buff = io.BytesIO() with self.cache.open_for_read(1) as cache_file: for chunk in cache_file: buff.write(chunk) self.assertEqual(FIXTURE_DATA, buff.getvalue()) @skip_if_disabled def test_get_image_size(self): """Test convenience wrapper for querying cache file size via its image identifier. """ self._setup_fixture_file() size = self.cache.get_image_size(1) self.assertEqual(FIXTURE_LENGTH, size) @skip_if_disabled def test_delete(self): """Test delete method that removes an image from the cache.""" self._setup_fixture_file() self.cache.delete_cached_image(1) self.assertFalse(self.cache.is_cached(1)) @skip_if_disabled def test_delete_all(self): """Test delete method that removes an image from the cache.""" for image_id in (1, 2): self.assertFalse(self.cache.is_cached(image_id)) for image_id in (1, 2): FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(image_id, FIXTURE_FILE)) for image_id in (1, 2): self.assertTrue(self.cache.is_cached(image_id)) self.cache.delete_all_cached_images() for image_id in (1, 2): self.assertFalse(self.cache.is_cached(image_id)) def _test_clean_invalid_path(self, failure=False): invalid_file_path = os.path.join(self.cache_dir, 'invalid', '1') invalid_file = open(invalid_file_path, 'wb') invalid_file.write(FIXTURE_DATA) invalid_file.close() self.assertTrue(os.path.exists(invalid_file_path)) self.delay_inaccurate_clock() if failure: with mock.patch.object( fileutils, 'delete_if_exists') as mock_delete: mock_delete.side_effect = OSError(errno.ENOENT, '') try: self.cache.clean() except OSError: self.assertTrue(os.path.exists(invalid_file_path)) else: self.cache.clean() self.assertFalse(os.path.exists(invalid_file_path)) @skip_if_disabled def test_clean_invalid_path(self): """Test the clean method removes expected image from invalid path.""" self._test_clean_invalid_path() @skip_if_disabled def test_clean_stalled(self): """Test the clean method removes expected images.""" incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1') incomplete_file = open(incomplete_file_path, 'wb') incomplete_file.write(FIXTURE_DATA) incomplete_file.close() self.assertTrue(os.path.exists(incomplete_file_path)) self.delay_inaccurate_clock() self.cache.clean(stall_time=0) self.assertFalse(os.path.exists(incomplete_file_path)) def _test_clean_stall_time(self, stall_time=None, days=2, stall_failed=False): """ Test the clean method removes the stalled images as expected """ incomplete_file_path_1 = os.path.join(self.cache_dir, 'incomplete', '1') incomplete_file_path_2 = os.path.join(self.cache_dir, 'incomplete', '2') for f in (incomplete_file_path_1, incomplete_file_path_2): incomplete_file = open(f, 'wb') incomplete_file.write(FIXTURE_DATA) incomplete_file.close() mtime = os.path.getmtime(incomplete_file_path_1) pastday = (datetime.datetime.fromtimestamp(mtime) - datetime.timedelta(days=days)) atime = int(time.mktime(pastday.timetuple())) mtime = atime os.utime(incomplete_file_path_1, (atime, mtime)) self.assertTrue(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) # If stall_time is None then it will wait for default time # of `image_cache_stall_time` which is 24 hours if stall_failed: with mock.patch.object( fileutils, 'delete_if_exists') as mock_delete: mock_delete.side_effect = OSError(errno.ENOENT, '') self.cache.clean(stall_time=stall_time) self.assertTrue(os.path.exists(incomplete_file_path_1)) else: self.cache.clean(stall_time=stall_time) self.assertFalse(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) @skip_if_disabled def test_clean_stalled_none_stall_time(self): self._test_clean_stall_time() @skip_if_disabled def test_clean_stalled_nonzero_stall_time(self): """Test the clean method removes expected images.""" self._test_clean_stall_time(stall_time=3600, days=1) @skip_if_disabled def test_prune(self): """ Test that pruning the cache works as expected... """ self.assertEqual(0, self.cache.get_cache_size()) # Add a bunch of images to the cache. The max cache size for the cache # is set to 5KB and each image is 1K. We use 11 images in this test. # The first 10 are added to and retrieved from cache in the same order. # Then, the 11th image is added to cache but not retrieved before we # prune. We should see only 5 images left after pruning, and the # images that are least recently accessed should be the ones pruned... for x in range(10): FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE)) self.assertEqual(10 * units.Ki, self.cache.get_cache_size()) # OK, hit the images that are now cached... for x in range(10): buff = io.BytesIO() with self.cache.open_for_read(x) as cache_file: for chunk in cache_file: buff.write(chunk) # Add a new image to cache. # This is specifically to test the bug: 1438564 FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(99, FIXTURE_FILE)) self.cache.prune() self.assertEqual(5 * units.Ki, self.cache.get_cache_size()) # Ensure images 0, 1, 2, 3, 4 & 5 are not cached anymore for x in range(0, 6): self.assertFalse(self.cache.is_cached(x), "Image %s was cached!" % x) # Ensure images 6, 7, 8 and 9 are still cached for x in range(6, 10): self.assertTrue(self.cache.is_cached(x), "Image %s was not cached!" % x) # Ensure the newly added image, 99, is still cached self.assertTrue(self.cache.is_cached(99), "Image 99 was not cached!") @skip_if_disabled def test_prune_to_zero(self): """Test that an image_cache_max_size of 0 doesn't kill the pruner This is a test specifically for LP #1039854 """ self.assertEqual(0, self.cache.get_cache_size()) FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file('xxx', FIXTURE_FILE)) self.assertEqual(1024, self.cache.get_cache_size()) # OK, hit the image that is now cached... buff = io.BytesIO() with self.cache.open_for_read('xxx') as cache_file: for chunk in cache_file: buff.write(chunk) self.config(image_cache_max_size=0) self.cache.prune() self.assertEqual(0, self.cache.get_cache_size()) self.assertFalse(self.cache.is_cached('xxx')) @skip_if_disabled def test_queue(self): """ Test that queueing works properly """ self.assertFalse(self.cache.is_cached(1)) self.assertFalse(self.cache.is_queued(1)) FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.queue_image(1)) self.assertTrue(self.cache.is_queued(1)) self.assertFalse(self.cache.is_cached(1)) # Should not return True if the image is already # queued for caching... self.assertFalse(self.cache.queue_image(1)) self.assertFalse(self.cache.is_cached(1)) # Test that we return False if we try to queue # an image that has already been cached self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) self.assertFalse(self.cache.is_queued(1)) self.assertTrue(self.cache.is_cached(1)) self.assertFalse(self.cache.queue_image(1)) self.cache.delete_cached_image(1) # Test that we return false if image is being cached incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1') incomplete_file = open(incomplete_file_path, 'wb') incomplete_file.write(FIXTURE_DATA) incomplete_file.close() self.assertFalse(self.cache.is_queued(1)) self.assertFalse(self.cache.is_cached(1)) self.assertTrue(self.cache.driver.is_being_cached(1)) self.assertFalse(self.cache.queue_image(1)) self.cache.clean(stall_time=0) for x in range(3): self.assertTrue(self.cache.queue_image(x)) self.assertEqual(['0', '1', '2'], self.cache.get_queued_images()) @skip_if_disabled def test_open_for_write_good(self): """ Test to see if open_for_write works in normal case """ # test a good case image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) with self.cache.driver.open_for_write(image_id) as cache_file: cache_file.write(b'a') self.assertTrue(self.cache.is_cached(image_id), "Image %s was NOT cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertFalse(os.path.exists(invalid_file_path)) @skip_if_disabled def test_open_for_write_with_exception(self): """ Test to see if open_for_write works in a failure case for each driver This case is where an exception is raised while the file is being written. The image is partially filled in cache and filling won't resume so verify the image is moved to invalid/ directory """ # test a case where an exception is raised while the file is open image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) try: with self.cache.driver.open_for_write(image_id): raise IOError except Exception as e: self.assertIsInstance(e, IOError) self.assertFalse(self.cache.is_cached(image_id), "Image %s was cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertTrue(os.path.exists(invalid_file_path)) @skip_if_disabled def test_caching_iterator(self): """ Test to see if the caching iterator interacts properly with the driver When the iterator completes going through the data the driver should have closed the image and placed it correctly """ # test a case where an exception NOT raised while the file is open, # and a consuming iterator completes def consume(image_id): data = [b'a', b'b', b'c', b'd', b'e', b'f'] checksum = None caching_iter = self.cache.get_caching_iter(image_id, checksum, iter(data)) self.assertEqual(data, list(caching_iter)) image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) consume(image_id) self.assertTrue(self.cache.is_cached(image_id), "Image %s was NOT cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertFalse(os.path.exists(invalid_file_path)) @skip_if_disabled def test_caching_iterator_handles_backend_failure(self): """ Test that when the backend fails, caching_iter does not continue trying to consume data, and rolls back the cache. """ def faulty_backend(): data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] for d in data: if d == b'Fail': raise exception.GlanceException('Backend failure') yield d def consume(image_id): caching_iter = self.cache.get_caching_iter(image_id, None, faulty_backend()) # exercise the caching_iter list(caching_iter) image_id = '1' self.assertRaises(exception.GlanceException, consume, image_id) # make sure bad image was not cached self.assertFalse(self.cache.is_cached(image_id)) @skip_if_disabled def test_caching_iterator_falloffend(self): """ Test to see if the caching iterator interacts properly with the driver in a case where the iterator is only partially consumed. In this case the image is only partially filled in cache and filling won't resume. When the iterator goes out of scope the driver should have closed the image and moved it from incomplete/ to invalid/ """ # test a case where a consuming iterator just stops. def falloffend(image_id): data = [b'a', b'b', b'c', b'd', b'e', b'f'] checksum = None caching_iter = self.cache.get_caching_iter(image_id, checksum, iter(data)) self.assertEqual(b'a', next(caching_iter)) image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) falloffend(image_id) self.assertFalse(self.cache.is_cached(image_id), "Image %s was cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertTrue(os.path.exists(invalid_file_path)) @skip_if_disabled def test_gate_caching_iter_good_checksum(self): image = b"12345678990abcdefghijklmnop" image_id = 123 md5 = secretutils.md5(usedforsecurity=False) md5.update(image) checksum = md5.hexdigest() with mock.patch('glance.db.get_api') as mock_get_db: db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = db cache = image_cache.ImageCache() img_iter = cache.get_caching_iter(image_id, checksum, [image]) for chunk in img_iter: pass # checksum is valid, fake image should be cached: self.assertTrue(cache.is_cached(image_id)) @skip_if_disabled def test_gate_caching_iter_bad_checksum(self): image = b"12345678990abcdefghijklmnop" image_id = 123 checksum = "foobar" # bad. with mock.patch('glance.db.get_api') as mock_get_db: db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = db cache = image_cache.ImageCache() img_iter = cache.get_caching_iter(image_id, checksum, [image]) def reader(): for chunk in img_iter: pass self.assertRaises(exception.GlanceException, reader) # checksum is invalid, caching will fail: self.assertFalse(cache.is_cached(image_id)) class TestImageCacheXattr(test_utils.BaseTestCase, ImageCacheTestCase): """Tests image caching when xattr is used in cache""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ super(TestImageCacheXattr, self).setUp() if getattr(self, 'disable', False): return self.cache_dir = self.useFixture(fixtures.TempDir()).path if not getattr(self, 'inited', False): try: import xattr # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.config(image_cache_dir=self.cache_dir, image_cache_driver='xattr', image_cache_max_size=5 * units.Ki) self.cache = image_cache.ImageCache() if not xattr_writes_supported(self.cache_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return class TestImageCacheCentralizedDb(test_utils.BaseTestCase, ImageCacheTestCase): """Tests image caching when Centralized DB is used in cache""" def setUp(self): super(TestImageCacheCentralizedDb, self).setUp() self.inited = True self.disabled = False self.cache_dir = self.useFixture(fixtures.TempDir()).path self.config(image_cache_dir=self.cache_dir, image_cache_driver='centralized_db', image_cache_max_size=5 * units.Ki, worker_self_reference_url='http://workerx') with mock.patch('glance.db.get_api') as mock_get_db: self.db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = self.db self.cache = image_cache.ImageCache() def test_node_reference_create_duplicate(self): with mock.patch('glance.db.get_api') as mock_get_db: self.db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = self.db with mock.patch.object( self.db, 'node_reference_create') as mock_node_create: mock_node_create.side_effect = exception.Duplicate with mock.patch.object( image_cache.drivers.centralized_db, 'LOG') as mock_log: image_cache.ImageCache() expected_calls = [ mock.call('Node reference is already recorded, ' 'ignoring it') ] mock_log.debug.assert_has_calls(expected_calls) def test_get_least_recently_accessed_os_error(self): self.assertEqual(0, self.cache.get_cache_size()) for x in range(10): FIXTURE_FILE = io.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE)) self.assertEqual(10 * units.Ki, self.cache.get_cache_size()) with mock.patch.object(os, 'stat') as mock_stat: mock_stat.side_effect = OSError image_id, size = self.cache.driver.get_least_recently_accessed() self.assertEqual(0, size) @skip_if_disabled def test_clean_stalled_fails(self): """Test the clean method fails to delete file, ignores the failure""" self._test_clean_stall_time(stall_time=3600, days=1, stall_failed=True) @skip_if_disabled def test_clean_invalid_path_fails(self): """Test the clean method fails to remove image from invalid path.""" self._test_clean_invalid_path(failure=True) class TestImageCacheSqlite(test_utils.BaseTestCase, ImageCacheTestCase): """Tests image caching when SQLite is used in cache""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-sqlite3 installed) """ super(TestImageCacheSqlite, self).setUp() if getattr(self, 'disable', False): return if not getattr(self, 'inited', False): try: import sqlite3 # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-sqlite3 not installed.") return self.inited = True self.disabled = False self.cache_dir = self.useFixture(fixtures.TempDir()).path self.config(image_cache_dir=self.cache_dir, image_cache_driver='sqlite', image_cache_max_size=5 * units.Ki) self.cache = image_cache.ImageCache() @mock.patch('glance.db.get_api') def _test_prefetcher(self, mock_get_db): self.config(enabled_backends={'cheap': 'file'}) store.register_store_opts(CONF) self.config(filesystem_store_datadir='/tmp', group='cheap') store.create_multi_stores(CONF) tempf = tempfile.NamedTemporaryFile() tempf.write(b'foo') db = unit_test_utils.FakeDB(initialize=False) mock_get_db.return_value = db ctx = context.RequestContext(is_admin=True, roles=['admin']) gateway = glance_gateway.Gateway() image_factory = gateway.get_image_factory(ctx) image_repo = gateway.get_repo(ctx) fetcher = prefetcher.Prefetcher() # Create an image with no values set and queue it image = image_factory.new_image() image_repo.add(image) fetcher.cache.queue_image(image.image_id) # Image is not active, so it should fail to cache, but remain queued self.assertFalse(fetcher.run()) self.assertFalse(fetcher.cache.is_cached(image.image_id)) self.assertTrue(fetcher.cache.is_queued(image.image_id)) # Set the disk/container format and give it a location image.disk_format = 'raw' image.container_format = 'bare' image.status = 'active' loc = {'url': 'file://%s' % tempf.name, 'metadata': {'store': 'cheap'}} with mock.patch('glance.location._check_image_location'): # FIXME(danms): Why do I have to do this? image.locations = [loc] image_repo.save(image) # Image is now active and has a location, so it should cache self.assertTrue(fetcher.run()) self.assertTrue(fetcher.cache.is_cached(image.image_id)) self.assertFalse(fetcher.cache.is_queued(image.image_id)) @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) def test_prefetcher_greenthread(self): async_.set_threadpool_model('eventlet') self._test_prefetcher() @mock.patch('glance.async_._THREADPOOL_MODEL', new=None) def test_prefetcher_native(self): async_.set_threadpool_model('native') self._test_prefetcher() class TestImageCacheNoDep(test_utils.BaseTestCase): def setUp(self): super(TestImageCacheNoDep, self).setUp() self.driver = None def init_driver(self2): self2.driver = self.driver self.mock_object(image_cache.ImageCache, 'init_driver', init_driver) def test_get_caching_iter_when_write_fails(self): class FailingFile(object): def write(self, data): if data == "Fail": raise IOError class FailingFileDriver(object): def is_cacheable(self, *args, **kwargs): return True @contextmanager def open_for_write(self, *args, **kwargs): yield FailingFile() self.driver = FailingFileDriver() cache = image_cache.ImageCache() data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) self.assertEqual(data, list(caching_iter)) def test_get_caching_iter_when_open_fails(self): class OpenFailingDriver(object): def is_cacheable(self, *args, **kwargs): return True @contextmanager def open_for_write(self, *args, **kwargs): raise IOError self.driver = OpenFailingDriver() cache = image_cache.ImageCache() data = [b'a', b'b', b'c', b'd', b'e', b'f'] caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) self.assertEqual(data, list(caching_iter)) class TestImagePrefetcher(test_utils.BaseTestCase): def setUp(self): super(TestImagePrefetcher, self).setUp() self.cache_dir = self.useFixture(fixtures.TempDir()).path self.config(image_cache_dir=self.cache_dir, image_cache_driver='xattr', image_cache_max_size=5 * units.Ki) self.prefetcher = prefetcher.Prefetcher() def test_fetch_image_into_cache_without_auth(self): with mock.patch.object(self.prefetcher.gateway, 'get_repo') as mock_get: self.prefetcher.fetch_image_into_cache('fake-image-id') mock_get.assert_called_once_with(mock.ANY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_manage.py0000664000175000017500000010227100000000000021274 0ustar00zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io from unittest import mock import fixtures from glance.cmd import manage from glance.common import exception from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import metadata as db_metadata from glance.tests import utils as test_utils from sqlalchemy.engine.url import make_url as sqlalchemy_make_url class TestManageBase(test_utils.BaseTestCase): def setUp(self): super(TestManageBase, self).setUp() def clear_conf(): manage.CONF.reset() manage.CONF.unregister_opt(manage.command_opt) clear_conf() self.addCleanup(clear_conf) self.useFixture(fixtures.MonkeyPatch( 'oslo_log.log.setup', lambda product_name, version='test': None)) patcher = mock.patch('glance.db.sqlalchemy.api.get_engine') patcher.start() self.addCleanup(patcher.stop) def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs): self.useFixture(fixtures.MonkeyPatch('sys.argv', argv)) manage.main() func_name.assert_called_once_with(*exp_args, **exp_kwargs) class TestLegacyManage(TestManageBase): @mock.patch.object(manage.DbCommands, 'version') def test_legacy_db_version(self, db_upgrade): self._main_test_helper(['glance.cmd.manage', 'db_version'], manage.DbCommands.version) @mock.patch.object(manage.DbCommands, 'sync') def test_legacy_db_sync(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_sync'], manage.DbCommands.sync, None) @mock.patch.object(manage.DbCommands, 'upgrade') def test_legacy_db_upgrade(self, db_upgrade): self._main_test_helper(['glance.cmd.manage', 'db_upgrade'], manage.DbCommands.upgrade, None) @mock.patch.object(manage.DbCommands, 'version_control') def test_legacy_db_version_control(self, db_version_control): self._main_test_helper(['glance.cmd.manage', 'db_version_control'], manage.DbCommands.version_control, None) @mock.patch.object(manage.DbCommands, 'sync') def test_legacy_db_sync_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_sync', 'liberty'], manage.DbCommands.sync, 'liberty') @mock.patch.object(manage.DbCommands, 'upgrade') def test_legacy_db_upgrade_version(self, db_upgrade): self._main_test_helper(['glance.cmd.manage', 'db_upgrade', 'liberty'], manage.DbCommands.upgrade, 'liberty') @mock.patch.object(manage.DbCommands, 'expand') def test_legacy_db_expand(self, db_expand): self._main_test_helper(['glance.cmd.manage', 'db_expand'], manage.DbCommands.expand) @mock.patch.object(manage.DbCommands, 'migrate') def test_legacy_db_migrate(self, db_migrate): self._main_test_helper(['glance.cmd.manage', 'db_migrate'], manage.DbCommands.migrate) @mock.patch.object(manage.DbCommands, 'contract') def test_legacy_db_contract(self, db_contract): self._main_test_helper(['glance.cmd.manage', 'db_contract'], manage.DbCommands.contract) def test_db_metadefs_unload(self): db_metadata.db_unload_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'], db_metadata.db_unload_metadefs, db_api.get_engine()) def test_db_metadefs_load(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs'], db_metadata.db_load_metadefs, db_api.get_engine(), None, None, None, None) def test_db_metadefs_load_with_specified_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', None, None, None) def test_db_metadefs_load_from_path_merge(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', None, None) def test_db_metadefs_load_from_merge_and_prefer_new(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', 'True', None) def test_db_metadefs_load_from_merge_and_prefer_new_and_overwrite(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True', 'True', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', 'True', 'True') def test_db_metadefs_export(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs'], db_metadata.db_export_metadefs, db_api.get_engine(), None) def test_db_metadefs_export_with_specified_path(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs', '/mock/'], db_metadata.db_export_metadefs, db_api.get_engine(), '/mock/') class TestManage(TestManageBase): def setUp(self): super(TestManage, self).setUp() self.db = manage.DbCommands() self.output = io.StringIO() self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) def test_db_complex_password(self): engine = mock.Mock() # See comments in get_alembic_config; make an engine url with # password characters that will be escaped, to ensure the # resulting value makes it into alembic unaltered. engine.url = sqlalchemy_make_url( 'mysql+pymysql://username:pw@%/!#$()@host:1234/dbname') alembic_config = alembic_migrations.get_alembic_config(engine) self.assertEqual(str(engine.url), alembic_config.get_main_option('sqlalchemy.url')) @mock.patch('glance.db.sqlalchemy.api.get_engine') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.data_migrations.' 'has_pending_migrations') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') def test_db_check_result(self, mock_get_alembic_branch_head, mock_get_current_alembic_heads, mock_has_pending_migrations, get_mock_engine): get_mock_engine.return_value = mock.Mock() engine = get_mock_engine.return_value engine.engine.name = 'postgresql' exit = self.assertRaises(SystemExit, self.db.check) self.assertIn('Rolling upgrades are currently supported only for ' 'MySQL and Sqlite', exit.code) engine = get_mock_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.return_value = 'pike_expand01' exit = self.assertRaises(SystemExit, self.db.check) self.assertEqual(3, exit.code) self.assertIn('Your database is not up to date. ' 'Your first step is to run `glance-manage db expand`.', self.output.getvalue()) mock_get_current_alembic_heads.return_value = ['pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', None] mock_has_pending_migrations.return_value = [mock.Mock()] exit = self.assertRaises(SystemExit, self.db.check) self.assertEqual(4, exit.code) self.assertIn('Your database is not up to date. ' 'Your next step is to run `glance-manage db migrate`.', self.output.getvalue()) mock_get_current_alembic_heads.return_value = ['pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] mock_has_pending_migrations.return_value = None exit = self.assertRaises(SystemExit, self.db.check) self.assertEqual(5, exit.code) self.assertIn('Your database is not up to date. ' 'Your next step is to run `glance-manage db contract`.', self.output.getvalue()) mock_get_current_alembic_heads.return_value = ['pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] mock_has_pending_migrations.return_value = None self.assertRaises(SystemExit, self.db.check) self.assertIn('Database is up to date. No upgrades needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, 'expand') @mock.patch.object(manage.DbCommands, 'migrate') @mock.patch.object(manage.DbCommands, 'contract') def test_sync(self, mock_contract, mock_migrate, mock_expand, mock_get_alembic_branch_head, mock_get_current_alembic_heads): mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.return_value = ['pike_contract01'] self.db.sync() mock_expand.assert_called_once_with(online_migration=False) mock_migrate.assert_called_once_with(online_migration=False) mock_contract.assert_called_once_with(online_migration=False) self.assertIn('Database is synced successfully.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch('alembic.command.upgrade') def test_sync_db_is_already_sync(self, mock_upgrade, mock_get_alembic_branch_head, mock_get_current_alembic_heads): mock_get_current_alembic_heads.return_value = ['pike_contract01'] mock_get_alembic_branch_head.return_value = ['pike_contract01'] self.assertRaises(SystemExit, self.db.sync) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') @mock.patch.object(manage.DbCommands, 'expand') def test_sync_failed_to_sync(self, mock_expand, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', ''] mock_expand.side_effect = exception.GlanceException exit = self.assertRaises(SystemExit, self.db.sync) self.assertIn('Failed to sync database: ERROR:', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') @mock.patch.object(manage.DbCommands, '_sync') def test_expand(self, mock_sync, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.side_effect = ['ocata_contract01', 'pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] self.db.expand() mock_sync.assert_called_once_with(version='pike_expand01') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_expand_if_not_expand_head(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.return_value = [] exit = self.assertRaises(SystemExit, self.db.expand) self.assertIn('Database expansion failed. Couldn\'t find head ' 'revision of expand branch.', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_expand_db_is_already_sync(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] self.assertRaises(SystemExit, self.db.expand) self.assertIn('Database is up to date. No migrations needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_expand_already_sync(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] self.db.expand() self.assertIn('Database expansion is up to date. ' 'No expansion needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') @mock.patch.object(manage.DbCommands, '_sync') def test_expand_failed(self, mock_sync, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.side_effect = ['ocata_contract01', 'test'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] exit = self.assertRaises(SystemExit, self.db.expand) mock_sync.assert_called_once_with(version='pike_expand01') self.assertIn('Database expansion failed. Database expansion should ' 'have brought the database version up to "pike_expand01"' ' revision. But, current revisions are: test ', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.data_migrations.' 'has_pending_migrations') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') @mock.patch.object(manage.DbCommands, '_sync') def test_contract(self, mock_sync, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads, mock_has_pending_migrations): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.side_effect = ['pike_expand01', 'pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] mock_has_pending_migrations.return_value = False self.db.contract() mock_sync.assert_called_once_with(version='pike_contract01') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_contract_if_not_contract_head(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.return_value = [] exit = self.assertRaises(SystemExit, self.db.contract) self.assertIn('Database contraction failed. Couldn\'t find head ' 'revision of contract branch.', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_contract_db_is_already_sync(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] self.assertRaises(SystemExit, self.db.contract) self.assertIn('Database is up to date. No migrations needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_contract_before_expand(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_expand01', 'pike_contract01'] exit = self.assertRaises(SystemExit, self.db.contract) self.assertIn('Database contraction did not run. Database ' 'contraction cannot be run before database expansion. ' 'Run database expansion first using "glance-manage db ' 'expand"', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.data_migrations.' 'has_pending_migrations') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_contract_before_migrate(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_curr_alembic_heads, mock_has_pending_migrations): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_curr_alembic_heads.side_effect = ['pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] mock_has_pending_migrations.return_value = [mock.Mock()] exit = self.assertRaises(SystemExit, self.db.contract) self.assertIn('Database contraction did not run. Database ' 'contraction cannot be run before data migration is ' 'complete. Run data migration using "glance-manage db ' 'migrate".', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.data_migrations.' 'has_pending_migrations') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_migrate(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads, mock_has_pending_migrations): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.side_effect = ['pike_expand01', 'pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] mock_has_pending_migrations.return_value = None self.db.migrate() self.assertIn('Database migration is up to date. ' 'No migration needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_migrate_db_is_already_sync(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['pike_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] self.assertRaises(SystemExit, self.db.migrate) self.assertIn('Database is up to date. No migrations needed.', self.output.getvalue()) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_migrate_already_sync(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['ocata_contract01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] exit = self.assertRaises(SystemExit, self.db.migrate) self.assertIn('Data migration did not run. Data migration cannot be ' 'run before database expansion. Run database expansion ' 'first using "glance-manage db expand"', exit.code) @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.data_migrations.' 'has_pending_migrations') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_current_alembic_heads') @mock.patch( 'glance.db.sqlalchemy.alembic_migrations.get_alembic_branch_head') @mock.patch.object(manage.DbCommands, '_validate_engine') def test_migrate_before_expand(self, mock_validate_engine, mock_get_alembic_branch_head, mock_get_current_alembic_heads, mock_has_pending_migrations): engine = mock_validate_engine.return_value engine.engine.name = 'mysql' mock_get_current_alembic_heads.return_value = ['pike_expand01'] mock_get_alembic_branch_head.side_effect = ['pike_contract01', 'pike_expand01'] mock_has_pending_migrations.return_value = None self.db.migrate() self.assertIn('Database migration is up to date. ' 'No migration needed.', self.output.getvalue()) @mock.patch.object(manage.DbCommands, 'version') def test_db_version(self, version): self._main_test_helper(['glance.cmd.manage', 'db', 'version'], manage.DbCommands.version) @mock.patch.object(manage.DbCommands, 'check') def test_db_check(self, check): self._main_test_helper(['glance.cmd.manage', 'db', 'check'], manage.DbCommands.check) @mock.patch.object(manage.DbCommands, 'sync') def test_db_sync(self, sync): self._main_test_helper(['glance.cmd.manage', 'db', 'sync'], manage.DbCommands.sync) @mock.patch.object(manage.DbCommands, 'upgrade') def test_db_upgrade(self, upgrade): self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade'], manage.DbCommands.upgrade) @mock.patch.object(manage.DbCommands, 'version_control') def test_db_version_control(self, version_control): self._main_test_helper(['glance.cmd.manage', 'db', 'version_control'], manage.DbCommands.version_control) @mock.patch.object(manage.DbCommands, 'sync') def test_db_sync_version(self, sync): self._main_test_helper(['glance.cmd.manage', 'db', 'sync', 'liberty'], manage.DbCommands.sync, 'liberty') @mock.patch.object(manage.DbCommands, 'upgrade') def test_db_upgrade_version(self, upgrade): self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade', 'liberty'], manage.DbCommands.upgrade, 'liberty') @mock.patch.object(manage.DbCommands, 'expand') def test_db_expand(self, expand): self._main_test_helper(['glance.cmd.manage', 'db', 'expand'], manage.DbCommands.expand) @mock.patch.object(manage.DbCommands, 'migrate') def test_db_migrate(self, migrate): self._main_test_helper(['glance.cmd.manage', 'db', 'migrate'], manage.DbCommands.migrate) @mock.patch.object(manage.DbCommands, 'contract') def test_db_contract(self, contract): self._main_test_helper(['glance.cmd.manage', 'db', 'contract'], manage.DbCommands.contract) def test_db_metadefs_unload(self): db_metadata.db_unload_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'], db_metadata.db_unload_metadefs, db_api.get_engine()) def test_db_metadefs_load(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs'], db_metadata.db_load_metadefs, db_api.get_engine(), None, False, False, False) def test_db_metadefs_load_with_specified_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', False, False, False) def test_db_metadefs_load_prefer_new_with_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/', '--merge', '--prefer_new'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', True, True, False) def test_db_metadefs_load_prefer_new(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--prefer_new'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, True, False) def test_db_metadefs_load_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, False, True) def test_db_metadefs_load_prefer_new_and_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--prefer_new', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, True, True) def test_db_metadefs_load_from_path_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/', '--merge', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', True, False, True) def test_db_metadefs_export(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs'], db_metadata.db_export_metadefs, db_api.get_engine(), None) def test_db_metadefs_export_with_specified_path(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs', '--path', '/mock/'], db_metadata.db_export_metadefs, db_api.get_engine(), '/mock/') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_misc.py0000664000175000017500000000534000000000000020776 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from glance.common import crypt from glance.common import utils from glance.tests import utils as test_utils class UtilsTestCase(test_utils.BaseTestCase): def test_encryption(self): # Check that original plaintext and unencrypted ciphertext match # Check keys of the three allowed lengths key_list = ["1234567890abcdef", "12345678901234567890abcd", "1234567890abcdef1234567890ABCDEF"] plaintext_list = [''] blocksize = 64 for i in range(3 * blocksize): text = os.urandom(i).decode('latin1') plaintext_list.append(text) for key in key_list: for plaintext in plaintext_list: ciphertext = crypt.urlsafe_encrypt(key, plaintext, blocksize) self.assertIsInstance(ciphertext, str) self.assertNotEqual(ciphertext, plaintext) text = crypt.urlsafe_decrypt(key, ciphertext) self.assertIsInstance(text, str) self.assertEqual(plaintext, text) def test_empty_metadata_headers(self): """Ensure unset metadata is not encoded in HTTP headers""" metadata = { 'foo': 'bar', 'snafu': None, 'bells': 'whistles', 'unset': None, 'empty': '', 'properties': { 'distro': '', 'arch': None, 'user': 'nobody', }, } headers = utils.image_meta_to_http_headers(metadata) self.assertNotIn('x-image-meta-snafu', headers) self.assertNotIn('x-image-meta-uset', headers) self.assertNotIn('x-image-meta-snafu', headers) self.assertNotIn('x-image-meta-property-arch', headers) self.assertEqual('bar', headers.get('x-image-meta-foo')) self.assertEqual('whistles', headers.get('x-image-meta-bells')) self.assertEqual('', headers.get('x-image-meta-empty')) self.assertEqual('', headers.get('x-image-meta-property-distro')) self.assertEqual('nobody', headers.get('x-image-meta-property-user')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_notifier.py0000664000175000017500000010302600000000000021662 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import glance_store from oslo_config import cfg import oslo_messaging import webob import glance.async_ from glance.common import exception from glance.common import timeutils import glance.context from glance import notifier import glance.tests.unit.utils as unit_test_utils from glance.tests import utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' class ImageStub(glance.domain.Image): def get_data(self, offset=0, chunk_size=None): return ['01234', '56789'] def set_data(self, data, size, backend=None, set_active=True): for chunk in data: pass class ImageRepoStub(object): def remove(self, *args, **kwargs): return 'image_from_get' def save(self, *args, **kwargs): return 'image_from_save' def add(self, *args, **kwargs): return 'image_from_add' def get(self, *args, **kwargs): return 'image_from_get' def list(self, *args, **kwargs): return ['images_from_list'] class ImageMemberRepoStub(object): def remove(self, *args, **kwargs): return 'image_member_from_remove' def save(self, *args, **kwargs): return 'image_member_from_save' def add(self, *args, **kwargs): return 'image_member_from_add' def get(self, *args, **kwargs): return 'image_member_from_get' def list(self, *args, **kwargs): return ['image_members_from_list'] class TaskStub(glance.domain.TaskStub): def run(self, executor): pass class Task(glance.domain.Task): def succeed(self, result): pass def fail(self, message): pass class TaskRepoStub(object): def remove(self, *args, **kwargs): return 'task_from_remove' def save(self, *args, **kwargs): return 'task_from_save' def add(self, *args, **kwargs): return 'task_from_add' def get_task(self, *args, **kwargs): return 'task_from_get' def list(self, *args, **kwargs): return ['tasks_from_list'] class TestNotifier(utils.BaseTestCase): @mock.patch.object(oslo_messaging, 'Notifier') @mock.patch.object(oslo_messaging, 'get_notification_transport') def _test_load_strategy(self, mock_get_transport, mock_notifier, url, driver): nfier = notifier.Notifier() mock_get_transport.assert_called_with(cfg.CONF) self.assertIsNotNone(nfier._transport) mock_notifier.assert_called_with(nfier._transport, publisher_id='image.localhost') self.assertIsNotNone(nfier._notifier) def test_notifier_load(self): self._test_load_strategy(url=None, driver=None) @mock.patch.object(oslo_messaging, 'set_transport_defaults') def test_set_defaults(self, mock_set_trans_defaults): notifier.set_defaults(control_exchange='foo') mock_set_trans_defaults.assert_called_with('foo') notifier.set_defaults() mock_set_trans_defaults.assert_called_with('glance') class TestImageNotifications(utils.BaseTestCase): """Test Image Notifications work""" def setUp(self): super(TestImageNotifications, self).setUp() self.image = ImageStub( image_id=UUID1, name='image-1', status='active', size=1024, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', virtual_size=2048, tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', locations=['http://127.0.0.1']) self.context = glance.context.RequestContext(tenant=TENANT2, user=USER1) self.image_repo_stub = ImageRepoStub() self.notifier = unit_test_utils.FakeNotifier() self.image_repo_proxy = glance.notifier.ImageRepoProxy( self.image_repo_stub, self.context, self.notifier) self.image_proxy = glance.notifier.ImageProxy( self.image, self.context, self.notifier) def test_image_save_notification(self): self.image_repo_proxy.save(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.update', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_save_notification_disabled(self): self.config(disabled_notifications=["image.update"]) self.image_repo_proxy.save(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_add_notification(self): self.image_repo_proxy.add(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_add_notification_disabled(self): self.config(disabled_notifications=["image.create"]) self.image_repo_proxy.add(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_delete_notification(self): self.image_repo_proxy.remove(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.delete', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) self.assertTrue(output_log['payload']['deleted']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_delete_notification_disabled(self): self.config(disabled_notifications=['image.delete']) self.image_repo_proxy.remove(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_get(self): image = self.image_repo_proxy.get(UUID1) self.assertIsInstance(image, glance.notifier.ImageProxy) self.assertEqual('image_from_get', image.repo) def test_image_list(self): images = self.image_repo_proxy.list() self.assertIsInstance(images[0], glance.notifier.ImageProxy) self.assertEqual('images_from_list', images[0].repo) def test_image_get_data_should_call_next_image_get_data(self): with mock.patch.object(self.image, 'get_data') as get_data_mock: self.image_proxy.get_data() self.assertTrue(get_data_mock.called) def test_image_get_data_notification(self): self.image_proxy.size = 10 data = ''.join(self.image_proxy.get_data()) self.assertEqual('0123456789', data) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.send', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['image_id']) self.assertEqual(TENANT2, output_log['payload']['receiver_tenant_id']) self.assertEqual(USER1, output_log['payload']['receiver_user_id']) self.assertEqual(10, output_log['payload']['bytes_sent']) self.assertEqual(TENANT1, output_log['payload']['owner_id']) def test_image_get_data_notification_disabled(self): self.config(disabled_notifications=['image.send']) self.image_proxy.size = 10 data = ''.join(self.image_proxy.get_data()) self.assertEqual('0123456789', data) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_get_data_size_mismatch(self): self.image_proxy.size = 11 list(self.image_proxy.get_data()) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.send', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['image_id']) def test_image_set_data_prepare_notification(self): insurance = {'called': False} def data_iterator(): output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.prepare', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) self.assertEqual(['store1', 'store2'], output_log['payload'][ 'os_glance_importing_to_stores']) self.assertEqual([], output_log['payload']['os_glance_failed_import']) yield 'abcd' yield 'efgh' insurance['called'] = True self.image_proxy.extra_properties[ 'os_glance_importing_to_stores'] = 'store1,store2' self.image_proxy.extra_properties['os_glance_failed_import'] = '' self.image_proxy.set_data(data_iterator(), 8) self.assertTrue(insurance['called']) def test_image_set_data_prepare_notification_disabled(self): insurance = {'called': False} def data_iterator(): output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) yield 'abcd' yield 'efgh' insurance['called'] = True self.config(disabled_notifications=['image.prepare']) self.image_proxy.set_data(data_iterator(), 8) self.assertTrue(insurance['called']) def test_image_set_data_upload_and_activate_notification(self): image = ImageStub(image_id=UUID1, name='image-1', status='queued', created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public') context = glance.context.RequestContext(tenant=TENANT2, user=USER1) fake_notifier = unit_test_utils.FakeNotifier() image_proxy = glance.notifier.ImageProxy(image, context, fake_notifier) def data_iterator(): fake_notifier.log = [] yield 'abcde' yield 'fghij' image_proxy.extra_properties[ 'os_glance_importing_to_stores'] = 'store2' image_proxy.extra_properties[ 'os_glance_importing_to_stores'] = 'store1,store2' image_proxy.extra_properties['os_glance_failed_import'] = '' image_proxy.set_data(data_iterator(), 10) output_logs = fake_notifier.get_logs() self.assertEqual(2, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) self.assertEqual(['store2'], output_log['payload'][ 'os_glance_importing_to_stores']) self.assertEqual([], output_log['payload']['os_glance_failed_import']) output_log = output_logs[1] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.activate', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) def test_image_set_data_upload_and_not_activate_notification(self): insurance = {'called': False} def data_iterator(): self.notifier.log = [] yield 'abcde' yield 'fghij' self.image_proxy.extra_properties[ 'os_glance_importing_to_stores'] = 'store2' insurance['called'] = True self.image_proxy.set_data(data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) self.assertTrue(insurance['called']) def test_image_set_data_upload_and_activate_notification_disabled(self): insurance = {'called': False} image = ImageStub(image_id=UUID1, name='image-1', status='queued', created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public') context = glance.context.RequestContext(tenant=TENANT2, user=USER1) fake_notifier = unit_test_utils.FakeNotifier() image_proxy = glance.notifier.ImageProxy(image, context, fake_notifier) def data_iterator(): fake_notifier.log = [] yield 'abcde' yield 'fghij' insurance['called'] = True self.config(disabled_notifications=['image.activate', 'image.upload']) image_proxy.set_data(data_iterator(), 10) self.assertTrue(insurance['called']) output_logs = fake_notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_set_data_storage_full(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise glance_store.StorageFull(message='Modern Major General') self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Modern Major General', output_log['payload']) def test_image_set_data_value_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise ValueError('value wrong') self.assertRaises(webob.exc.HTTPBadRequest, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('value wrong', output_log['payload']) def test_image_set_data_duplicate(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.Duplicate('Cant have duplicates') self.assertRaises(webob.exc.HTTPConflict, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Cant have duplicates', output_log['payload']) def test_image_set_data_storage_write_denied(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise glance_store.StorageWriteDenied(message='The Very Model') self.assertRaises(webob.exc.HTTPServiceUnavailable, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('The Very Model', output_log['payload']) def test_image_set_data_forbidden(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.Forbidden('Not allowed') self.assertRaises(webob.exc.HTTPForbidden, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Not allowed', output_log['payload']) def test_image_set_data_not_found(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.NotFound('Not found') self.assertRaises(webob.exc.HTTPNotFound, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Not found', output_log['payload']) def test_image_set_data_HTTP_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise webob.exc.HTTPError('Http issue') self.assertRaises(webob.exc.HTTPError, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Http issue', output_log['payload']) def test_image_set_data_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.GlanceException('Failed') self.assertRaises(exception.GlanceException, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Failed', output_log['payload']) class TestImageMemberNotifications(utils.BaseTestCase): """Test Image Member Notifications work""" def setUp(self): super(TestImageMemberNotifications, self).setUp() self.context = glance.context.RequestContext(tenant=TENANT2, user=USER1) self.notifier = unit_test_utils.FakeNotifier() self.image = ImageStub( image_id=UUID1, name='image-1', status='active', size=1024, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', locations=['http://127.0.0.1']) self.image_member = glance.domain.ImageMembership( id=1, image_id=UUID1, member_id=TENANT1, created_at=DATETIME, updated_at=DATETIME, status='accepted') self.image_member_repo_stub = ImageMemberRepoStub() self.image_member_repo_proxy = glance.notifier.ImageMemberRepoProxy( self.image_member_repo_stub, self.image, self.context, self.notifier) self.image_member_proxy = glance.notifier.ImageMemberProxy( self.image_member, self.context, self.notifier) def _assert_image_member_with_notifier(self, output_log, deleted=False): self.assertEqual(self.image_member.member_id, output_log['payload']['member_id']) self.assertEqual(self.image_member.image_id, output_log['payload']['image_id']) self.assertEqual(self.image_member.status, output_log['payload']['status']) self.assertEqual(timeutils.isotime(self.image_member.created_at), output_log['payload']['created_at']) self.assertEqual(timeutils.isotime(self.image_member.updated_at), output_log['payload']['updated_at']) if deleted: self.assertTrue(output_log['payload']['deleted']) self.assertIsNotNone(output_log['payload']['deleted_at']) else: self.assertFalse(output_log['payload']['deleted']) self.assertIsNone(output_log['payload']['deleted_at']) def test_image_member_add_notification(self): self.image_member_repo_proxy.add(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.create', output_log['event_type']) self._assert_image_member_with_notifier(output_log) def test_image_member_add_notification_disabled(self): self.config(disabled_notifications=['image.member.create']) self.image_member_repo_proxy.add(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_member_save_notification(self): self.image_member_repo_proxy.save(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.update', output_log['event_type']) self._assert_image_member_with_notifier(output_log) def test_image_member_save_notification_disabled(self): self.config(disabled_notifications=['image.member.update']) self.image_member_repo_proxy.save(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_member_delete_notification(self): self.image_member_repo_proxy.remove(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.delete', output_log['event_type']) self._assert_image_member_with_notifier(output_log, deleted=True) def test_image_member_delete_notification_disabled(self): self.config(disabled_notifications=['image.member.delete']) self.image_member_repo_proxy.remove(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_image_member_get(self): image_member = self.image_member_repo_proxy.get(TENANT1) self.assertIsInstance(image_member, glance.notifier.ImageMemberProxy) self.assertEqual('image_member_from_get', image_member.repo) def test_image_member_list(self): image_members = self.image_member_repo_proxy.list() self.assertIsInstance(image_members[0], glance.notifier.ImageMemberProxy) self.assertEqual('image_members_from_list', image_members[0].repo) class TestTaskNotifications(utils.BaseTestCase): """Test Task Notifications work""" def setUp(self): super(TestTaskNotifications, self).setUp() task_input = {"loc": "fake"} self.task_stub = TaskStub( task_id='aaa', task_type='import', status='pending', owner=TENANT2, expires_at=None, created_at=DATETIME, updated_at=DATETIME, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', ) self.task = Task( task_id='aaa', task_type='import', status='pending', owner=TENANT2, expires_at=None, created_at=DATETIME, updated_at=DATETIME, task_input=task_input, result='res', message='blah', image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', ) self.context = glance.context.RequestContext( tenant=TENANT2, user=USER1 ) self.task_repo_stub = TaskRepoStub() self.notifier = unit_test_utils.FakeNotifier() self.task_repo_proxy = glance.notifier.TaskRepoProxy( self.task_repo_stub, self.context, self.notifier ) self.task_proxy = glance.notifier.TaskProxy( self.task, self.context, self.notifier ) self.task_stub_proxy = glance.notifier.TaskStubProxy( self.task_stub, self.context, self.notifier ) self.patcher = mock.patch.object(timeutils, 'utcnow') mock_utcnow = self.patcher.start() mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): super(TestTaskNotifications, self).tearDown() self.patcher.stop() def test_task_create_notification(self): self.task_repo_proxy.add(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.create', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) self.assertEqual( timeutils.isotime(self.task.updated_at), output_log['payload']['updated_at'] ) self.assertEqual( timeutils.isotime(self.task.created_at), output_log['payload']['created_at'] ) if 'location' in output_log['payload']: self.fail('Notification contained location field.') # Verify newly added fields 'image_id', 'user_id' and # 'request_id' are not part of notification yet self.assertNotIn('image_id', output_log['payload']) self.assertNotIn('user_id', output_log['payload']) self.assertNotIn('request_id', output_log['payload']) def test_task_create_notification_disabled(self): self.config(disabled_notifications=['task.create']) self.task_repo_proxy.add(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_task_delete_notification(self): now = timeutils.isotime() self.task_repo_proxy.remove(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.delete', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) self.assertEqual( timeutils.isotime(self.task.updated_at), output_log['payload']['updated_at'] ) self.assertEqual( timeutils.isotime(self.task.created_at), output_log['payload']['created_at'] ) self.assertEqual( now, output_log['payload']['deleted_at'] ) if 'location' in output_log['payload']: self.fail('Notification contained location field.') # Verify newly added fields 'image_id', 'user_id' and # 'request_id' are not part of notification yet self.assertNotIn('image_id', output_log['payload']) self.assertNotIn('user_id', output_log['payload']) self.assertNotIn('request_id', output_log['payload']) def test_task_delete_notification_disabled(self): self.config(disabled_notifications=['task.delete']) self.task_repo_proxy.remove(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_task_run_notification(self): with mock.patch('glance.async_.TaskExecutor') as mock_executor: executor = mock_executor.return_value executor._run.return_value = mock.Mock() self.task_proxy.run(executor=mock_executor) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.run', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) self.assertNotIn(self.task.image_id, output_log['payload']) self.assertNotIn(self.task.user_id, output_log['payload']) self.assertNotIn(self.task.request_id, output_log['payload']) def test_task_run_notification_disabled(self): self.config(disabled_notifications=['task.run']) with mock.patch('glance.async_.TaskExecutor') as mock_executor: executor = mock_executor.return_value executor._run.return_value = mock.Mock() self.task_proxy.run(executor=mock_executor) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_task_processing_notification(self): self.task_proxy.begin_processing() output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.processing', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) # Verify newly added fields 'image_id', 'user_id' and # 'request_id' are not part of notification yet self.assertNotIn('image_id', output_log['payload']) self.assertNotIn('user_id', output_log['payload']) self.assertNotIn('request_id', output_log['payload']) def test_task_processing_notification_disabled(self): self.config(disabled_notifications=['task.processing']) self.task_proxy.begin_processing() output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_task_success_notification(self): self.task_proxy.begin_processing() self.task_proxy.succeed(result=None) output_logs = self.notifier.get_logs() self.assertEqual(2, len(output_logs)) output_log = output_logs[1] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.success', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) # Verify newly added fields 'image_id', 'user_id' and # 'request_id' are not part of notification yet self.assertNotIn('image_id', output_log['payload']) self.assertNotIn('user_id', output_log['payload']) self.assertNotIn('request_id', output_log['payload']) def test_task_success_notification_disabled(self): self.config(disabled_notifications=['task.processing', 'task.success']) self.task_proxy.begin_processing() self.task_proxy.succeed(result=None) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_task_failure_notification(self): self.task_proxy.fail(message=None) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.failure', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) # Verify newly added fields 'image_id', 'user_id' and # 'request_id' are not part of notification yet self.assertNotIn('image_id', output_log['payload']) self.assertNotIn('user_id', output_log['payload']) self.assertNotIn('request_id', output_log['payload']) def test_task_failure_notification_disabled(self): self.config(disabled_notifications=['task.failure']) self.task_proxy.fail(message=None) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_policy.py0000664000175000017500000004544300000000000021352 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc from unittest import mock import hashlib import os.path import oslo_config.cfg from oslo_policy import policy as common_policy import glance.api.policy from glance.common import exception import glance.context from glance.policies import base as base_policy from glance.tests.unit import base UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class IterableMock(mock.Mock, abc.Iterable): def __iter__(self): while False: yield None class ImageRepoStub(object): def __init__(self): self.db_api = mock.Mock() self.db_api.image_member_find.return_value = [ {'member': 'foo'} ] def get(self, *args, **kwargs): context = mock.Mock() policy = mock.Mock() return glance.api.policy.ImageProxy( ImageStub(image_id=UUID1), context, policy ) def save(self, *args, **kwargs): return 'image_from_save' def add(self, *args, **kwargs): return 'image_from_add' def list(self, *args, **kwargs): return ['image_from_list_0', 'image_from_list_1'] class ImageStub(object): def __init__(self, image_id=None, visibility='private', container_format='bear', disk_format='raw', status='active', extra_properties=None, os_hidden=False): if extra_properties is None: extra_properties = {} self.image_id = image_id self.visibility = visibility self.container_format = container_format self.disk_format = disk_format self.status = status self.extra_properties = extra_properties self.checksum = 'c2e5db72bd7fd153f53ede5da5a06de3' self.os_hash_algo = 'sha512' self.os_hash_value = hashlib.sha512(b'glance').hexdigest() self.created_at = '2013-09-28T15:27:36Z' self.updated_at = '2013-09-28T15:27:37Z' self.locations = [] self.min_disk = 0 self.min_ram = 0 self.name = 'image_name' self.owner = 'tenant1' self.protected = False self.size = 0 self.virtual_size = 0 self.tags = [] self.os_hidden = os_hidden self.member = self.owner def delete(self): self.status = 'deleted' class ImageFactoryStub(object): def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, hidden=False, tags=None, **other_args): self.visibility = visibility self.hidden = hidden return 'new_image' class MemberRepoStub(object): image = None def add(self, image_member): image_member.output = 'member_repo_add' def get(self, *args, **kwargs): return 'member_repo_get' def save(self, image_member, from_state=None): image_member.output = 'member_repo_save' def list(self, *args, **kwargs): return 'member_repo_list' def remove(self, image_member): image_member.output = 'member_repo_remove' class ImageMembershipStub(object): def __init__(self, output=None): self.output = output class TaskRepoStub(object): def get(self, *args, **kwargs): return 'task_from_get' def add(self, *args, **kwargs): return 'task_from_add' def list(self, *args, **kwargs): return ['task_from_list_0', 'task_from_list_1'] class TaskStub(object): def __init__(self, task_id): self.task_id = task_id self.status = 'pending' def run(self, executor): self.status = 'processing' class TaskFactoryStub(object): def new_task(self, *args): return 'new_task' class MdNamespaceRepoStub(object): def add(self, namespace): return 'mdns_add' def get(self, namespace): return 'mdns_get' def list(self, *args, **kwargs): return ['mdns_list'] def save(self, namespace): return 'mdns_save' def remove(self, namespace): return 'mdns_remove' def remove_tags(self, namespace): return 'mdtags_remove' class MdObjectRepoStub(object): def add(self, obj): return 'mdobj_add' def get(self, ns, obj_name): return 'mdobj_get' def list(self, *args, **kwargs): return ['mdobj_list'] def save(self, obj): return 'mdobj_save' def remove(self, obj): return 'mdobj_remove' class MdResourceTypeRepoStub(object): def add(self, rt): return 'mdrt_add' def get(self, *args, **kwargs): return 'mdrt_get' def list(self, *args, **kwargs): return ['mdrt_list'] def remove(self, *args, **kwargs): return 'mdrt_remove' class MdPropertyRepoStub(object): def add(self, prop): return 'mdprop_add' def get(self, ns, prop_name): return 'mdprop_get' def list(self, *args, **kwargs): return ['mdprop_list'] def save(self, prop): return 'mdprop_save' def remove(self, prop): return 'mdprop_remove' class MdTagRepoStub(object): def add(self, tag): return 'mdtag_add' def add_tags(self, tags, can_append=False): return ['mdtag_add_tags'] def get(self, ns, tag_name): return 'mdtag_get' def list(self, *args, **kwargs): return ['mdtag_list'] def save(self, tag): return 'mdtag_save' def remove(self, tag): return 'mdtag_remove' class TestPolicyEnforcer(base.IsolatedUnitTest): def test_policy_enforce_unregistered(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(glance.api.policy.policy.PolicyNotRegistered, enforcer.enforce, context, 'wibble', {}) def test_policy_check_unregistered(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(glance.api.policy.policy.PolicyNotRegistered, enforcer.check, context, 'wibble', {}) def test_policy_file_default_rules_default_location(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=['reader']) enforcer.enforce(context, 'get_image', {'project_id': context.project_id}) def test_policy_file_custom_rules_default_location(self): rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'get_image', {}) def test_policy_file_custom_location(self): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'get_image', {}) def test_policy_file_check(self): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertEqual(False, enforcer.check(context, 'get_image', {})) def test_policy_file_get_image_default_everybody(self): rules = {"default": '', "get_image": ''} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertEqual(True, enforcer.check(context, 'get_image', {})) def test_policy_file_get_image_default_nobody(self): rules = {"default": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'get_image', {}) def _test_enforce_scope(self): policy_name = 'foo' rule = common_policy.RuleDefault( name=policy_name, check_str='role:bar', scope_types=['system']) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) enforcer.register_default(rule) context = glance.context.RequestContext( user_id='user', project_id='project', roles=['bar']) target = {} return enforcer.enforce(context, policy_name, target) def test_policy_enforcer_raises_forbidden_when_enforcing_scope(self): # Make sure we raise an exception if the context scope doesn't match # the scope of the rule when oslo.policy is configured to raise an # exception. self.config(enforce_scope=True, group='oslo_policy') self.assertRaises(exception.Forbidden, self._test_enforce_scope) def test_policy_enforcer_does_not_raise_forbidden(self): # Make sure we don't raise an exception for mismatched scopes unless # oslo.policy is configured to do so. self.config(enforce_scope=False, group='oslo_policy') self.assertTrue(self._test_enforce_scope()) def test_ensure_context_object_is_passed_to_policy_enforcement(self): # The oslo.policy Enforcer does some useful translation for us if we # pass it an oslo.context.RequestContext object. This prevents us from # having to handle the translation to a valid credential dictionary in # glance. context = glance.context.RequestContext() mock_enforcer = self.mock_object(common_policy.Enforcer, 'enforce') enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) enforcer.register_default( common_policy.RuleDefault(name='foo', check_str='role:bar') ) enforcer.enforce(context, 'foo', {}) mock_enforcer.assert_called_once_with('foo', {}, context, do_raise=True, exc=exception.Forbidden, action='foo') # Reset the mock and make sure glance.api.policy.Enforcer.check() # behaves the same way. mock_enforcer.reset_mock() enforcer.check(context, 'foo', {}) mock_enforcer.assert_called_once_with('foo', {}, context) class TestPolicyEnforcerNoFile(base.IsolatedUnitTest): def test_policy_file_specified_but_not_found(self): """Missing defined policy file should result in a default ruleset""" self.config(policy_file='gobble.gobble', group='oslo_policy') self.config(enforce_new_defaults=True, group='oslo_policy') enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'manage_image_cache', {}) admin_context = glance.context.RequestContext(roles=['admin']) enforcer.enforce(admin_context, 'manage_image_cache', {}) def test_policy_file_default_not_found(self): """Missing default policy file should result in a default ruleset""" self.config(enforce_new_defaults=True, group='oslo_policy') def fake_find_file(self, name): return None self.mock_object(oslo_config.cfg.ConfigOpts, 'find_file', fake_find_file) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'manage_image_cache', {}) admin_context = glance.context.RequestContext(roles=['admin']) enforcer.enforce(admin_context, 'manage_image_cache', {}) class TestContextPolicyEnforcer(base.IsolatedUnitTest): def _do_test_policy_influence_context_admin(self, policy_admin_role, context_role, context_is_admin, admin_expected): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {'context_is_admin': 'role:%s' % policy_admin_role} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) context = glance.context.RequestContext(roles=[context_role], is_admin=context_is_admin, policy_enforcer=enforcer) self.assertEqual(admin_expected, context.is_admin) def test_context_admin_policy_admin(self): self._do_test_policy_influence_context_admin('test_admin', 'test_admin', True, True) def test_context_nonadmin_policy_admin(self): self._do_test_policy_influence_context_admin('test_admin', 'test_admin', False, True) def test_context_admin_policy_nonadmin(self): self._do_test_policy_influence_context_admin('test_admin', 'demo', True, True) def test_context_nonadmin_policy_nonadmin(self): self._do_test_policy_influence_context_admin('test_admin', 'demo', False, False) class TestDefaultPolicyCheckStrings(base.IsolatedUnitTest): def test_project_member_check_string(self): expected = 'role:member and project_id:%(project_id)s' self.assertEqual(expected, base_policy.PROJECT_MEMBER) def test_admin_or_project_member_check_string(self): expected = ('rule:context_is_admin or ' '(role:member and project_id:%(project_id)s)') self.assertEqual(expected, base_policy.ADMIN_OR_PROJECT_MEMBER) def test_project_member_download_image_check_string(self): expected = ( "role:member and (project_id:%(project_id)s or " "project_id:%(member_id)s or 'community':%(visibility)s or " "'public':%(visibility)s or 'shared':%(visibility)s)" ) self.assertEqual( expected, base_policy. PROJECT_MEMBER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED ) def test_project_reader_check_string(self): expected = 'role:reader and project_id:%(project_id)s' self.assertEqual(expected, base_policy.PROJECT_READER) def test_admin_or_project_reader_check_string(self): expected = ('rule:context_is_admin or ' '(role:reader and project_id:%(project_id)s)') self.assertEqual(expected, base_policy.ADMIN_OR_PROJECT_READER) def test_project_reader_get_image_check_string(self): expected = ( "role:reader and (project_id:%(project_id)s or " "project_id:%(member_id)s or \'community\':%(visibility)s or " "'public':%(visibility)s or 'shared':%(visibility)s)" ) self.assertEqual( expected, base_policy. PROJECT_READER_OR_IMAGE_MEMBER_OR_COMMUNITY_OR_PUBLIC_OR_SHARED ) def test_service_or_member_check_string(self): expected = ( 'rule:service_api or (role:member and project_id:%(project_id)s' ' and project_id:%(owner)s)' ) self.assertEqual(expected, base_policy.SERVICE_OR_PROJECT_MEMBER) def test_service_check_string(self): expected = ( 'rule:service_api' ) self.assertEqual(expected, base_policy.SERVICE) class TestImageTarget(base.IsolatedUnitTest): def test_image_target_ignores_locations(self): image = ImageStub() target = glance.api.policy.ImageTarget(image) self.assertNotIn('locations', list(target)) def test_image_target_project_id_alias(self): image = ImageStub() target = glance.api.policy.ImageTarget(image) self.assertIn('project_id', target) self.assertEqual(image.owner, target['project_id']) self.assertEqual(image.owner, target['owner']) def test_image_target_transforms(self): fake_image = mock.MagicMock() fake_image.image_id = mock.sentinel.image_id fake_image.owner = mock.sentinel.owner fake_image.member = mock.sentinel.member target = glance.api.policy.ImageTarget(fake_image) # Make sure the key transforms work self.assertEqual(mock.sentinel.image_id, target['id']) self.assertEqual(mock.sentinel.owner, target['project_id']) self.assertEqual(mock.sentinel.member, target['member_id']) # Also make sure the base properties still work self.assertEqual(mock.sentinel.image_id, target['image_id']) self.assertEqual(mock.sentinel.owner, target['owner']) self.assertEqual(mock.sentinel.member, target['member']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_quota.py0000664000175000017500000011230700000000000021176 0ustar00zuulzuul00000000000000# Copyright 2013, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures from unittest import mock from unittest.mock import patch import uuid from oslo_limit import exception as ol_exc from oslo_utils import encodeutils from oslo_utils import units from glance.common import exception from glance.common import store_utils import glance.quota from glance.quota import keystone as ks_quota from glance.tests.unit import fixtures as glance_fixtures from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class FakeContext(object): owner = 'someone' is_admin = False class FakeImage(object): size = None image_id = 'someid' locations = [{'url': 'file:///not/a/path', 'metadata': {}}] tags = set([]) def set_data(self, data, size=None, backend=None, set_active=True): self.size = 0 for d in data: self.size += len(d) def __init__(self, **kwargs): self.extra_properties = kwargs.get('extra_properties', {}) class TestImageQuota(test_utils.BaseTestCase): def setUp(self): super(TestImageQuota, self).setUp() def _get_image(self, location_count=1, image_size=10): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'xyz' base_image.size = image_size image = glance.quota.ImageProxy(base_image, context, db_api, store) locations = [] for i in range(location_count): locations.append({'url': 'file:///g/there/it/is%d' % i, 'metadata': {}, 'status': 'active'}) image_values = {'id': 'xyz', 'owner': context.owner, 'status': 'active', 'size': image_size, 'locations': locations} db_api.image_create(context, image_values) return image def test_quota_allowed(self): quota = 10 self.config(user_storage_quota=str(quota)) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) data = '*' * quota base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(quota, base_image.size) def _test_quota_allowed_unit(self, data_length, config_quota): self.config(user_storage_quota=config_quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) data = '*' * data_length base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(data_length, base_image.size) def test_quota_allowed_unit_b(self): self._test_quota_allowed_unit(10, '10B') def test_quota_allowed_unit_kb(self): self._test_quota_allowed_unit(10, '1KB') def test_quota_allowed_unit_mb(self): self._test_quota_allowed_unit(10, '1MB') def test_quota_allowed_unit_gb(self): self._test_quota_allowed_unit(10, '1GB') def test_quota_allowed_unit_tb(self): self._test_quota_allowed_unit(10, '1TB') def _quota_exceeded_size(self, quota, data, deleted=True, size=None): self.config(user_storage_quota=quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) if deleted: with patch.object(store_utils, 'safe_delete_from_backend'): store_utils.safe_delete_from_backend( context, image.image_id, base_image.locations[0]) self.assertRaises(exception.StorageQuotaFull, image.set_data, data, size=size) def test_quota_exceeded_no_size(self): quota = 10 data = '*' * (quota + 1) # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit. # That's why 'get_remaining_quota' is mocked with return_value = 0. with patch.object(glance.api.common, 'get_remaining_quota', return_value=0): self._quota_exceeded_size(str(quota), data) def test_quota_exceeded_with_right_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_b(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size('10B', data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_kb(self): quota = units.Ki data = '*' * (quota + 1) self._quota_exceeded_size('1KB', data, size=len(data), deleted=False) def test_quota_exceeded_with_lie_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, deleted=False, size=quota - 1) def test_quota_exceeded_keystone_quotas(self): # Set our global limit to a tiny ten bytes self.config(user_storage_quota='10B') context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) # With keystone quotas disabled, a 100 byte image should fail the # global limit. data = '*' * 100 self.assertRaises(exception.StorageQuotaFull, image.set_data, data, size=len(data)) # If we turn on keystone quotas, the global limit gets ignored # so the same image no longer fails. self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) image.set_data(data, size=len(data)) def test_append_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.append(new_location) pre_add_locations.append(new_location) self.assertEqual(image.locations, pre_add_locations) def test_insert_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.insert(0, new_location) pre_add_locations.insert(0, new_location) self.assertEqual(image.locations, pre_add_locations) def test_extend_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.extend([new_location]) pre_add_locations.extend([new_location]) self.assertEqual(image.locations, pre_add_locations) def test_iadd_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations += [new_location] pre_add_locations += [new_location] self.assertEqual(image.locations, pre_add_locations) def test_set_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() image.locations = [new_location] self.assertEqual(image.locations, [new_location]) def _make_image_with_quota(self, image_size=10, location_count=2): quota = image_size * location_count self.config(user_storage_quota=str(quota)) return self._get_image(image_size=image_size, location_count=location_count) def test_exceed_append_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.append, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_insert_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.insert, 0, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_extend_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.extend, [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}]) def test_set_location_under(self): image = self._make_image_with_quota(location_count=1) image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] def test_set_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}, {'url': 'file:///a/path2', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_iadd_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations += [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_append_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.mock_object(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.append({'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_insert_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.mock_object(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.insert(0, {'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_set_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.mock_object(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}], image.locations) def test_iadd_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.mock_object(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) class TestImagePropertyQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImagePropertyQuotas, self).setUp() self.base_image = FakeImage() self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_mock.add.return_value = self.base_image self.image_repo_mock.save.return_value = self.base_image self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_save_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_save_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.save, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_save_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_add_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def test_add_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.add, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_add_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def _quota_exceed_setup(self): self.config(image_property_quota=2) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) def test_modify_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertEqual('frob', self.base_image.extra_properties['foo']) self.assertEqual('eggs', self.base_image.extra_properties['spam']) def test_delete_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) def test_invalid_quota_config_parameter(self): self.config(user_storage_quota='foo') location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.assertRaises(exception.InvalidOptionValue, self.image.locations.append, location) def test_exceed_quota_during_patch_operation(self): self._quota_exceed_setup() self.image.extra_properties['frob'] = 'baz' self.image.extra_properties['lorem'] = 'ipsum' self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) self.assertEqual('ipsum', self.base_image.extra_properties['lorem']) del self.image.extra_properties['frob'] del self.image.extra_properties['lorem'] self.image_repo_proxy.save(self.image) call_args = mock.call(self.base_image, from_state=None) self.assertEqual(call_args, self.image_repo_mock.save.call_args) self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertNotIn('frob', self.base_image.extra_properties) self.assertNotIn('lorem', self.base_image.extra_properties) def test_quota_exceeded_after_delete_image_properties(self): self.config(image_property_quota=3) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham', 'frob': 'baz'} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) class TestImageTagQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageTagQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.tags = set([]) self.base_image.extra_properties = {} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_tag(self): self.config(image_tag_quota=1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_replace_too_many_image_tags(self): self.config(image_tag_quota=0) exc = self.assertRaises(exception.ImageTagLimitExceeded, setattr, self.image, 'tags', ['foo', 'bar']) self.assertIn('Attempted: 2, Maximum: 0', encodeutils.exception_to_unicode(exc)) self.assertEqual(0, len(self.image.tags)) def test_replace_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_add_image_tag(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_add_too_many_image_tags(self): self.config(image_tag_quota=1) self.image.tags.add('foo') exc = self.assertRaises(exception.ImageTagLimitExceeded, self.image.tags.add, 'bar') self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_remove_image_tag_while_over_quota(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) self.config(image_tag_quota=0) self.image.tags.remove('foo') self.assertEqual(0, len(self.image.tags)) class TestQuotaImageTagsProxy(test_utils.BaseTestCase): def setUp(self): super(TestQuotaImageTagsProxy, self).setUp() def test_add(self): proxy = glance.quota.QuotaImageTagsProxy(set([])) proxy.add('foo') self.assertIn('foo', proxy) def test_add_too_many_tags(self): self.config(image_tag_quota=0) proxy = glance.quota.QuotaImageTagsProxy(set([])) exc = self.assertRaises(exception.ImageTagLimitExceeded, proxy.add, 'bar') self.assertIn('Attempted: 1, Maximum: 0', encodeutils.exception_to_unicode(exc)) def test_equals(self): proxy = glance.quota.QuotaImageTagsProxy(set([])) self.assertEqual(set([]), proxy) def test_not_equals(self): proxy = glance.quota.QuotaImageTagsProxy(set([])) self.assertNotEqual('foo', proxy) def test_contains(self): proxy = glance.quota.QuotaImageTagsProxy(set(['foo'])) self.assertIn('foo', proxy) def test_len(self): proxy = glance.quota.QuotaImageTagsProxy(set(['foo', 'bar', 'baz', 'niz'])) self.assertEqual(4, len(proxy)) def test_iter(self): items = set(['foo', 'bar', 'baz', 'niz']) proxy = glance.quota.QuotaImageTagsProxy(items.copy()) self.assertEqual(4, len(items)) for item in proxy: items.remove(item) self.assertEqual(0, len(items)) def test_tags_attr_no_loop(self): proxy = glance.quota.QuotaImageTagsProxy(None) self.assertEqual(set([]), proxy.tags) def test_tags_deepcopy(self): proxy = glance.quota.QuotaImageTagsProxy(set(['a', 'b'])) proxy_copy = copy.deepcopy(proxy) self.assertEqual(set(['a', 'b']), proxy_copy.tags) self.assertIn('a', proxy_copy) # remove is a found via __getattr__ proxy_copy.remove('a') self.assertNotIn('a', proxy_copy) def test_tags_delete(self): proxy = glance.quota.QuotaImageTagsProxy(set(['a', 'b'])) self.assertEqual(set(['a', 'b']), proxy.tags) del proxy.tags self.assertIsNone(proxy.tags) class TestImageMemberQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberQuotas, self).setUp() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) context = FakeContext() self.image = mock.Mock() self.base_image_member_factory = mock.Mock() self.image_member_factory = glance.quota.ImageMemberFactoryProxy( self.base_image_member_factory, context, db_api, store) def test_new_image_member(self): self.config(image_member_quota=1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_unlimited_members(self): self.config(image_member_quota=-1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_too_many_members(self): self.config(image_member_quota=0) self.assertRaises(exception.ImageMemberLimitExceeded, self.image_member_factory.new_image_member, self.image, 'fake_id') class TestImageLocationQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageLocationQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.locations = [] self.base_image.size = 1 self.base_image.extra_properties = {} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_location(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {} }] self.assertEqual(1, len(self.image.locations)) def test_replace_too_many_image_locations(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] locations = [ {"url": "file:///fake1.img.tar.gz", "metadata": {}}, {"url": "file:///fake2.img.tar.gz", "metadata": {}}, {"url": "file:///fake3.img.tar.gz", "metadata": {}} ] exc = self.assertRaises(exception.ImageLocationLimitExceeded, setattr, self.image, 'locations', locations) self.assertIn('Attempted: 3, Maximum: 1', encodeutils.exception_to_unicode(exc)) self.assertEqual(1, len(self.image.locations)) def test_replace_unlimited_image_locations(self): self.config(image_location_quota=-1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] self.assertEqual(1, len(self.image.locations)) def test_add_image_location(self): self.config(image_location_quota=1) location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.image.locations.append(location) self.assertEqual(1, len(self.image.locations)) def test_add_too_many_image_locations(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}} exc = self.assertRaises(exception.ImageLocationLimitExceeded, self.image.locations.append, location2) self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_locations(self): self.config(image_location_quota=-1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) def test_remove_image_location_while_over_quota(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) self.config(image_location_quota=0) self.image.locations.remove(location1) self.assertEqual(0, len(self.image.locations)) class TestImageKeystoneQuota(test_utils.BaseTestCase): def setUp(self): super(TestImageKeystoneQuota, self).setUp() default_limits = { ks_quota.QUOTA_IMAGE_SIZE_TOTAL: 500, 'another_limit': 2, } ksqf = glance_fixtures.KeystoneQuotaFixture(**default_limits) self.useFixture(ksqf) self.db_api = unit_test_utils.FakeDB() self.useFixture(fixtures.MockPatch('glance.quota.keystone.db', self.db_api)) def _create_fake_image(self, context, size): location_count = 2 locations = [] for i in range(location_count): locations.append({'url': 'file:///g/there/it/is%d' % i, 'status': 'active', 'metadata': {}}) image_values = {'id': str(uuid.uuid4()), 'owner': context.owner, 'status': 'active', 'size': size * units.Mi, 'locations': locations} self.db_api.image_create(context, image_values) def test_enforce_overquota(self): # Check that a single large image with multiple locations will # trip the quota check. self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() self._create_fake_image(context, 300) exc = self.assertRaises(exception.LimitExceeded, ks_quota.enforce_image_size_total, context, context.owner) self.assertIn('image_size_total is over limit of 500', str(exc)) def test_enforce_overquota_with_delta(self): # Check that delta is honored, if used. self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() self._create_fake_image(context, 200) ks_quota.enforce_image_size_total(context, context.owner) ks_quota.enforce_image_size_total(context, context.owner, delta=50) self.assertRaises(exception.LimitExceeded, ks_quota.enforce_image_size_total, context, context.owner, delta=200) def test_enforce_overquota_disabled(self): # Just like the overquota case above, but without being enabled, # so no failure self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=False) context = FakeContext() self._create_fake_image(context, 300) # Does not raise because keystone limits are disabled ks_quota.enforce_image_size_total(context, context.owner) def test_enforce_overquota_multiple(self): # Check that multiple images with a combined amount # (2*2*150=600) over the quota will trip the quota check. self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() self._create_fake_image(context, 150) self._create_fake_image(context, 150) exc = self.assertRaises(exception.LimitExceeded, ks_quota.enforce_image_size_total, context, context.owner) self.assertIn('image_size_total is over limit of 500', str(exc)) def test_enforce_underquota(self): self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() self._create_fake_image(context, 100) # We are under quota, so no exception expected ks_quota.enforce_image_size_total(context, context.owner) def test_enforce_underquota_with_others_over_quota(self): self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) # Put the first tenant over quota context = FakeContext() self._create_fake_image(context, 300) self._create_fake_image(context, 300) # Create an image for another tenant that is not over quota other_context = FakeContext() other_context.owner = 'someone_else' self._create_fake_image(other_context, 100) # This tenant should pass the quota check, because it is under quota, # even though the other is over. ks_quota.enforce_image_size_total(other_context, other_context.owner) def test_enforce_multiple_limits_under_quota(self): self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() # Make sure that we can call the multi-limit handler and pass when # we are under quota. ks_quota._enforce_some(context, context.owner, {ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200, 'another_limit': lambda: 1}, {'another_limit': 1}) def test_enforce_multiple_limits_over_quota(self): self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) context = FakeContext() # Make sure that even if one of a multi-limit call is over # quota, we get the exception. self.assertRaises(exception.LimitExceeded, ks_quota._enforce_some, context, context.owner, {ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200, 'another_limit': lambda: 1}, {'another_limit': 5}) @mock.patch('oslo_limit.limit.Enforcer') @mock.patch.object(ks_quota, 'LOG') def test_oslo_limit_config_fail(self, mock_LOG, mock_enforcer): self.config(endpoint_id='ENDPOINT_ID', group='oslo_limit') self.config(use_keystone_limits=True) mock_enforcer.return_value.enforce.side_effect = ( ol_exc.SessionInitError('test')) context = FakeContext() self._create_fake_image(context, 100) self.assertRaises(ol_exc.SessionInitError, ks_quota.enforce_image_size_total, context, context.owner) mock_LOG.error.assert_called_once_with( 'Failed to initialize oslo_limit, likely due to ' 'incorrect or insufficient configuration: %(err)s', {'err': "Can't initialise OpenStackSDK session: test."}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_schema.py0000664000175000017500000001345100000000000021305 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception import glance.schema from glance.tests import utils as test_utils class TestBasicSchema(test_utils.BaseTestCase): def setUp(self): super(TestBasicSchema, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } self.schema = glance.schema.Schema('basic', properties) def test_validate_passes(self): obj = {'ham': 'no', 'eggs': 'scrambled'} self.schema.validate(obj) # No exception raised def test_validate_fails_on_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_validate_fails_on_bad_type(self): obj = {'eggs': 2} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_filter_strips_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} filtered = self.schema.filter(obj) expected = {'ham': 'virginia', 'eggs': 'scrambled'} self.assertEqual(expected, filtered) def test_merge_properties(self): self.schema.merge_properties({'bacon': {'type': 'string'}}) expected = set(['ham', 'eggs', 'bacon']) actual = set(self.schema.raw()['properties'].keys()) self.assertEqual(expected, actual) def test_merge_conflicting_properties(self): conflicts = {'eggs': {'type': 'integer'}} self.assertRaises(exception.SchemaLoadError, self.schema.merge_properties, conflicts) def test_merge_conflicting_but_identical_properties(self): conflicts = {'ham': {'type': 'string'}} self.schema.merge_properties(conflicts) # no exception raised expected = set(['ham', 'eggs']) actual = set(self.schema.raw()['properties'].keys()) self.assertEqual(expected, actual) def test_raw_json_schema(self): expected = { 'name': 'basic', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'additionalProperties': False, } self.assertEqual(expected, self.schema.raw()) class TestBasicSchemaLinks(test_utils.BaseTestCase): def setUp(self): super(TestBasicSchemaLinks, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } links = [ {'rel': 'up', 'href': '/menu'}, ] self.schema = glance.schema.Schema('basic', properties, links) def test_raw_json_schema(self): expected = { 'name': 'basic', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'links': [ {'rel': 'up', 'href': '/menu'}, ], 'additionalProperties': False, } self.assertEqual(expected, self.schema.raw()) class TestPermissiveSchema(test_utils.BaseTestCase): def setUp(self): super(TestPermissiveSchema, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } self.schema = glance.schema.PermissiveSchema('permissive', properties) def test_validate_with_additional_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} self.schema.validate(obj) # No exception raised def test_validate_rejects_non_string_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'grits': 1000} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_filter_passes_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} filtered = self.schema.filter(obj) self.assertEqual(obj, filtered) def test_raw_json_schema(self): expected = { 'name': 'permissive', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'additionalProperties': {'type': 'string'}, } self.assertEqual(expected, self.schema.raw()) class TestCollectionSchema(test_utils.BaseTestCase): def test_raw_json_schema(self): item_properties = {'cheese': {'type': 'string'}} item_schema = glance.schema.Schema('mouse', item_properties) collection_schema = glance.schema.CollectionSchema('mice', item_schema) expected = { 'name': 'mice', 'properties': { 'mice': { 'type': 'array', 'items': item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } self.assertEqual(expected, collection_schema.raw()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_scrubber.py0000664000175000017500000001542300000000000021655 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import patch import uuid import glance_store from oslo_config import cfg from glance.common import exception from glance.db.sqlalchemy import api as db_api from glance import scrubber from glance.tests import utils as test_utils CONF = cfg.CONF class TestScrubber(test_utils.BaseTestCase): def setUp(self): super(TestScrubber, self).setUp() glance_store.register_opts(CONF) self.config(group='glance_store', default_store='file', filesystem_store_datadir=self.test_dir) glance_store.create_stores() def tearDown(self): # These globals impact state outside of this test class, kill them. scrubber._file_queue = None scrubber._db_queue = None super(TestScrubber, self).tearDown() def _scrubber_cleanup_with_store_delete_exception(self, ex): uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' scrub = scrubber.Scrubber(glance_store) with patch.object(glance_store, "delete_from_backend") as _mock_delete: _mock_delete.side_effect = ex scrub._scrub_image(id, [(id, '-', uri)]) @mock.patch.object(db_api, "image_get") def test_store_delete_successful(self, mock_image_get): uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' scrub = scrubber.Scrubber(glance_store) with patch.object(glance_store, "delete_from_backend"): scrub._scrub_image(id, [(id, '-', uri)]) @mock.patch.object(db_api, "image_get") def test_store_delete_store_exceptions(self, mock_image_get): # While scrubbing image data, all store exceptions, other than # NotFound, cause image scrubbing to fail. Essentially, no attempt # would be made to update the status of image. uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' ex = glance_store.GlanceStoreException() scrub = scrubber.Scrubber(glance_store) with patch.object(glance_store, "delete_from_backend") as _mock_delete: _mock_delete.side_effect = ex scrub._scrub_image(id, [(id, '-', uri)]) @mock.patch.object(db_api, "image_get") def test_store_delete_notfound_exception(self, mock_image_get): # While scrubbing image data, NotFound exception is ignored and image # scrubbing succeeds uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' ex = glance_store.NotFound(message='random') scrub = scrubber.Scrubber(glance_store) with patch.object(glance_store, "delete_from_backend") as _mock_delete: _mock_delete.side_effect = ex scrub._scrub_image(id, [(id, '-', uri)]) def test_scrubber_exits(self): # Checks for Scrubber exits when it is not able to fetch jobs from # the queue scrub_jobs = scrubber.ScrubDBQueue.get_all_locations scrub_jobs = mock.MagicMock() scrub_jobs.side_effect = exception.NotFound scrub = scrubber.Scrubber(glance_store) self.assertRaises(exception.FailedToGetScrubberJobs, scrub._get_delete_jobs) @mock.patch.object(db_api, "image_restore") def test_scrubber_revert_image_status(self, mock_image_restore): scrub = scrubber.Scrubber(glance_store) scrub.revert_image_status('fake_id') mock_image_restore.side_effect = exception.ImageNotFound self.assertRaises(exception.ImageNotFound, scrub.revert_image_status, 'fake_id') mock_image_restore.side_effect = exception.Conflict self.assertRaises(exception.Conflict, scrub.revert_image_status, 'fake_id') class TestScrubDBQueue(test_utils.BaseTestCase): def setUp(self): super(TestScrubDBQueue, self).setUp() def _create_image_list(self, count): images = [] for x in range(count): images.append({'id': x}) return images def test_get_all_images(self): scrub_queue = scrubber.ScrubDBQueue() images = self._create_image_list(15) image_pager = ImagePager(images) def make_get_images_detailed(pager): def mock_get_images_detailed(ctx, filters, marker=None, limit=None): return pager() return mock_get_images_detailed with patch.object(db_api, 'image_get_all') as ( _mock_get_images_detailed): _mock_get_images_detailed.side_effect = ( make_get_images_detailed(image_pager)) actual = list(scrub_queue._get_all_images()) self.assertEqual(images, actual) def test_get_all_images_paged(self): scrub_queue = scrubber.ScrubDBQueue() images = self._create_image_list(15) image_pager = ImagePager(images, page_size=4) def make_get_images_detailed(pager): def mock_get_images_detailed(ctx, filters, marker=None, limit=None): return pager() return mock_get_images_detailed with patch.object(db_api, 'image_get_all') as ( _mock_get_images_detailed): _mock_get_images_detailed.side_effect = ( make_get_images_detailed(image_pager)) actual = list(scrub_queue._get_all_images()) self.assertEqual(images, actual) class ImagePager(object): def __init__(self, images, page_size=0): image_count = len(images) if page_size == 0 or page_size > image_count: page_size = image_count self.image_batches = [] start = 0 while start < image_count: self.image_batches.append(images[start: start + page_size]) start += page_size if (image_count - start) < page_size: page_size = image_count - start def __call__(self): if len(self.image_batches) == 0: return [] else: return self.image_batches.pop(0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_sqlite_migration.py0000664000175000017500000001710700000000000023421 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import contextmanager import os import sqlite3 import tempfile import time from unittest import mock import uuid from oslo_config import cfg from glance import sqlite_migration import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF FAKE_IMAGE_1 = str(uuid.uuid4()) class TestMigrate(test_utils.BaseTestCase): def _store_dir(self, store): return os.path.join(self.test_dir, store) def setUp(self): super(TestMigrate, self).setUp() self.config(worker_self_reference_url='http://worker1.example.com') fd, self.db = tempfile.mkstemp(suffix=".db") os.close(fd) self.db_api = unit_test_utils.FakeDB(initialize=False) self.migrate = sqlite_migration.Migrate(self.db, self.db_api) self.addCleanup(self.drop_db) def drop_db(self): if os.path.exists(self.db): os.remove(self.db) def create_db(self): conn = sqlite3.connect(self.db, check_same_thread=False, factory=sqlite3.Connection) conn.executescript(""" CREATE TABLE IF NOT EXISTS cached_images ( image_id TEXT PRIMARY KEY, last_accessed REAL DEFAULT 0.0, last_modified REAL DEFAULT 0.0, size INTEGER DEFAULT 0, hits INTEGER DEFAULT 0, checksum TEXT ); """) conn.close() @contextmanager def get_db(self): """ Returns a context manager that produces a database connection that self-closes and calls rollback if an error occurs while using the database connection """ conn = sqlite3.connect(self.db, check_same_thread=False, factory=sqlite3.Connection) conn.row_factory = sqlite3.Row conn.text_factory = str conn.execute('PRAGMA synchronous = NORMAL') conn.execute('PRAGMA count_changes = OFF') conn.execute('PRAGMA temp_store = MEMORY') try: yield conn except sqlite3.DatabaseError: conn.rollback() finally: conn.close() def initialize_fake_cache_details(self): with self.get_db() as sq_db: filesize = 100 now = time.time() sq_db.execute("""INSERT INTO cached_images (image_id, last_accessed, last_modified, hits, size) VALUES (?, ?, ?, ?, ?)""", (FAKE_IMAGE_1, now, now, 0, filesize)) sq_db.commit() def test_migrate_if_required_false(self): self.config(image_cache_driver="sqlite") self.assertFalse(sqlite_migration.migrate_if_required()) def test_migrate_if_required_cache_disabled(self): self.config(flavor="keystone", group="paste_deploy") self.config(image_cache_driver="centralized_db") self.assertFalse(sqlite_migration.migrate_if_required()) @mock.patch('os.path.exists') @mock.patch('os.path.join', new=mock.MagicMock()) def test_migrate_if_required_db_not_found(self, mock_exists): mock_exists.return_value = False self.config(flavor="keystone+cache", group="paste_deploy") self.config(image_cache_driver="centralized_db") with mock.patch.object(sqlite_migration, 'LOG') as mock_log: sqlite_migration.migrate_if_required() mock_log.debug.assert_called_once_with( 'SQLite caching database not located, skipping migration') def test_migrate_empty_db(self): with mock.patch.object(sqlite_migration, 'LOG') as mock_log: self.migrate.migrate() expected_calls = [ mock.call('Adding local node reference %(node)s in ' 'centralized db', {'node': 'http://worker1.example.com'}), mock.call('Connecting to SQLite db %s', self.db), ] mock_log.debug.assert_has_calls(expected_calls) def test_migrate_duplicate_node_reference(self): self.migrate.migrate() with mock.patch.object(sqlite_migration, 'LOG') as mock_log: # Ensure to call migrate again for duplicating node reference self.migrate.migrate() expected_calls = [ mock.call('Adding local node reference %(node)s in ' 'centralized db', {'node': 'http://worker1.example.com'}), mock.call('Node reference %(node)s is already recorded, ' 'ignoring it', {'node': 'http://worker1.example.com'}), mock.call('Connecting to SQLite db %s', self.db), ] mock_log.debug.assert_has_calls(expected_calls) def test_migrate_record_exists_in_centralized_db(self): self.create_db() self.initialize_fake_cache_details() with mock.patch.object(sqlite_migration, 'LOG') as mock_log: with mock.patch.object( self.db_api, 'is_image_cached_for_node') as mock_call: mock_call.return_value = True self.migrate.migrate() expected_calls = [ mock.call('Adding local node reference %(node)s in ' 'centralized db', {'node': 'http://worker1.example.com'}), mock.call('Connecting to SQLite db %s', self.db), mock.call('Skipping migrating image %(uuid)s from SQLite to ' 'Centralized db for node %(node)s as it is present ' 'in Centralized db.', {'uuid': FAKE_IMAGE_1, 'node': 'http://worker1.example.com'}) ] mock_log.debug.assert_has_calls(expected_calls) def test_migrate(self): self.config(image_cache_driver="centralized_db") self.create_db() self.initialize_fake_cache_details() with mock.patch.object(sqlite_migration, 'LOG') as mock_log: self.migrate.migrate() expected_calls = [ mock.call('Adding local node reference %(node)s in ' 'centralized db', {'node': 'http://worker1.example.com'}), mock.call('Connecting to SQLite db %s', self.db), mock.call('Migrating image %s from SQLite to Centralized db.', FAKE_IMAGE_1), mock.call('Image %(uuid)s is migrated to centralized db for ' 'node %(node)s', {'uuid': FAKE_IMAGE_1, 'node': 'http://worker1.example.com'}), mock.call('Deleting image %s from SQLite db', FAKE_IMAGE_1), mock.call('Migrated %d records from SQLite db to ' 'Centralized db', 1) ] mock_log.debug.assert_has_calls(expected_calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_store_image.py0000664000175000017500000013573300000000000022353 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import exceptions as crypto_exception from cursive import exception as cursive_exception from cursive import signature_utils import glance_store from unittest import mock from glance.common import exception import glance.location from glance.tests.unit import base as unit_test_base from glance.tests.unit import utils as unit_test_utils from glance.tests import utils BASE_URI = 'http://storeurl.com/container' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '228c6da5-29cd-4d67-9457-ed632e083fc0' class ImageRepoStub(object): def add(self, image): return image def save(self, image, from_state=None): return image class ImageStub(object): def __init__(self, image_id, status=None, locations=None, visibility=None, extra_properties=None, virtual_size=0): self.image_id = image_id self.status = status self.locations = locations or [] self.visibility = visibility self.size = None self.extra_properties = extra_properties or {} self.os_hash_algo = None self.os_hash_value = None self.checksum = None self.disk_format = 'raw' self.container_format = 'bare' self.virtual_size = virtual_size def delete(self): self.status = 'deleted' def get_member_repo(self): return FakeMemberRepo(self, [TENANT1, TENANT2]) class ImageFactoryStub(object): def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): return ImageStub(image_id, visibility=visibility, extra_properties=extra_properties, **other_args) class FakeMemberRepo(object): def __init__(self, image, tenants=None): self.image = image self.factory = glance.domain.ImageMemberFactory() self.tenants = tenants or [] def list(self, *args, **kwargs): return [self.factory.new_image_member(self.image, tenant) for tenant in self.tenants] def add(self, member): self.tenants.append(member.member_id) def remove(self, member): self.tenants.remove(member.member_id) class TestStoreMultiBackends(utils.BaseTestCase): def setUp(self): self.store_api = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) self.enabled_backends = { "ceph1": "rbd", "ceph2": "rbd" } super(TestStoreMultiBackends, self).setUp() self.config(enabled_backends=self.enabled_backends) @mock.patch("glance.location.signature_utils.get_verifier") def test_set_data_calls_upload_to_store(self, msig): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'VALID' } image_stub = ImageStub(UUID2, status='queued', locations=[], extra_properties=extra_properties, virtual_size=4) image_stub.disk_format = 'iso' image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) with mock.patch.object(image, "_upload_to_store") as mloc: image.set_data('YYYY', 4, backend='ceph1') msig.assert_called_once_with(context=context, img_signature_certificate_uuid='UUID', img_signature_hash_method='METHOD', img_signature='VALID', img_signature_key_type='TYPE') mloc.assert_called_once_with('YYYY', msig.return_value, 'ceph1', 4) self.assertEqual('active', image.status) def test_image_set_data(self): store_api = mock.MagicMock() store_api.add_with_multihash.return_value = ( "rbd://ceph1", 4, "Z", "MH", {"backend": "ceph1"}) context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) image.set_data('YYYY', 4, backend='ceph1') self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual("rbd://ceph1", image.locations[0]['url']) self.assertEqual({"backend": "ceph1"}, image.locations[0]['metadata']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) @mock.patch('glance.location.LOG') def test_image_set_data_valid_signature(self, mock_log): store_api = mock.MagicMock() store_api.add_with_multihash.return_value = ( "rbd://ceph1", 4, "Z", "MH", {"backend": "ceph1"}) context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'VALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.mock_object(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) image.set_data('YYYY', 4, backend='ceph1') self.assertEqual('active', image.status) call = mock.call('Successfully verified signature for image %s', UUID2) mock_log.info.assert_has_calls([call]) @mock.patch("glance.location.signature_utils.get_verifier") def test_image_set_data_invalid_signature(self, msig): msig.return_value.verify.side_effect = \ crypto_exception.InvalidSignature store_api = mock.MagicMock() store_api.add_with_multihash.return_value = ( "rbd://ceph1", 4, "Z", "MH", {"backend": "ceph1"}) context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) self.assertRaises(cursive_exception.SignatureVerificationError, image.set_data, 'YYYY', 4, backend='ceph1') class TestStoreImage(utils.BaseTestCase): def setUp(self): locations = [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}] self.image_stub = ImageStub(UUID1, 'active', locations) self.store_api = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) super(TestStoreImage, self).setUp() def test_image_delete(self): image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) location = image.locations[0] self.assertEqual('active', image.status) self.store_api.get_from_backend(location['url'], context={}) image.delete() self.assertEqual('deleted', image.status) self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, location['url'], {}) def test_image_get_data(self): image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) self.assertEqual('XXX', image.get_data()) def test_image_get_data_from_second_location(self): def fake_get_from_backend(self, location, offset=0, chunk_size=None, context=None): if UUID1 in location: raise Exception('not allow download from %s' % location) else: return self.data[location] image1 = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) self.assertEqual('XXX', image1.get_data()) # Multiple location support context = glance.context.RequestContext(user=USER1) (image2, image_stub2) = self._add_image(context, UUID2, 'ZZZ', 3) location_data = image2.locations[0] with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 3 image1.locations.append(location_data) self.assertEqual(2, len(image1.locations)) self.assertEqual(UUID2, location_data['url']) self.mock_object(unit_test_utils.FakeStoreAPI, 'get_from_backend', fake_get_from_backend) # This time, image1.get_data() returns the data wrapped in a # LimitingReader|CooperativeReader|InfoWrapper pipeline, so # peeking under the hood of those objects to get at the # underlying string. self.assertEqual('ZZZ', image1.get_data().data.fd._source) image1.locations.pop(0) self.assertEqual(1, len(image1.locations)) image2.delete() def test_image_set_data(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) # We are going to pass an iterable data source, so use the # FakeStoreAPIReader that actually reads from that data store_api = unit_test_utils.FakeStoreAPIReader() image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) image.set_data(iter(['YYYY']), 4) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) self.assertEqual(4, image.virtual_size) def test_image_set_data_inspector_no_match(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image_stub.disk_format = 'qcow2' # We are going to pass an iterable data source, so use the # FakeStoreAPIReader that actually reads from that data store_api = unit_test_utils.FakeStoreAPIReader() image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) image.set_data(iter(['YYYY']), 4) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) self.assertEqual(0, image.virtual_size) @mock.patch('glance.common.format_inspector.QcowInspector.virtual_size', new_callable=mock.PropertyMock) @mock.patch('glance.common.format_inspector.QcowInspector.format_match', new_callable=mock.PropertyMock) def test_image_set_data_inspector_virtual_size_failure(self, mock_fm, mock_vs): # Force our format to match mock_fm.return_value = True # Make virtual_size fail in some unexpected way mock_vs.side_effect = ValueError('some error') context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image_stub.disk_format = 'qcow2' # We are going to pass an iterable data source, so use the # FakeStoreAPIReader that actually reads from that data store_api = unit_test_utils.FakeStoreAPIReader() image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) # Make sure set_data proceeds even though the format clearly # does not match image.set_data(iter(['YYYY']), 4) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) self.assertEqual(0, image.virtual_size) @mock.patch('glance.common.format_inspector.get_inspector') def test_image_set_data_inspector_not_needed(self, mock_gi): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image_stub.virtual_size = 123 image_stub.disk_format = 'qcow2' # We are going to pass an iterable data source, so use the # FakeStoreAPIReader that actually reads from that data store_api = unit_test_utils.FakeStoreAPIReader() image = glance.location.ImageProxy(image_stub, context, store_api, self.store_utils) image.set_data(iter(['YYYY']), 4) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) self.assertEqual(123, image.virtual_size) # If the image already had virtual_size set (i.e. we're setting # a new location), we should not re-calculate the value. mock_gi.assert_not_called() def test_image_set_data_location_metadata(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) loc_meta = {'key': 'value5032'} store_api = unit_test_utils.FakeStoreAPI(store_metadata=loc_meta) store_utils = unit_test_utils.FakeStoreUtils(store_api) image = glance.location.ImageProxy(image_stub, context, store_api, store_utils) image.set_data('YYYY', 4) self.assertEqual(4, image.size) location_data = image.locations[0] self.assertEqual(UUID2, location_data['url']) self.assertEqual(loc_meta, location_data['metadata']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) image.delete() self.assertEqual(image.status, 'deleted') self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, image.locations[0]['url'], {}) def test_image_set_data_unknown_size(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image_stub.disk_format = 'iso' image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', None) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) image.delete() self.assertEqual(image.status, 'deleted') self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, image.locations[0]['url'], context={}) @mock.patch('glance.location.LOG') def test_image_set_data_valid_signature(self, mock_log): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'VALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.mock_object(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual('active', image.status) mock_log.info.assert_any_call( 'Successfully verified signature for image %s', UUID2) def test_image_set_data_invalid_signature(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.mock_object(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) with mock.patch.object(self.store_api, 'delete_from_backend') as mock_delete: self.assertRaises(cursive_exception.SignatureVerificationError, image.set_data, 'YYYY', 4) mock_delete.assert_called() def test_image_set_data_invalid_signature_missing_metadata(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.mock_object(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) # Image is still active, since invalid signature was ignored self.assertEqual('active', image.status) def _add_image(self, context, image_id, data, len): image_stub = ImageStub(image_id, status='queued', locations=[]) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data(data, len) self.assertEqual(len, image.size) # NOTE(markwash): FakeStore returns image_id for location location = {'url': image_id, 'metadata': {}, 'status': 'active'} self.assertEqual([location], image.locations) self.assertEqual([location], image_stub.locations) self.assertEqual('active', image.status) return (image, image_stub) def test_image_change_append_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.append, location_bad) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_append_invalid_location_metatdata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) # Using only one test rule here is enough to make sure # 'store.check_location_metadata()' can be triggered # in Location proxy layer. Complete test rule for # 'store.check_location_metadata()' testing please # check below cases within 'TestStoreMetaDataChecker'. location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.append, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_append_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.append(location3) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_pop_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.append(location3) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.locations.pop() self.assertEqual([location2], image_stub1.locations) self.assertEqual([location2], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_extend_invalid_locations_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.extend, [location_bad]) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_extend_invalid_locations_metadata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.extend, [location_bad]) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_extend_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.extend([location3]) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_remove_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} location_bad = {'url': 'unknown://location', 'metadata': {}} image1.locations.extend([location3]) image1.locations.remove(location2) self.assertEqual([location3], image_stub1.locations) self.assertEqual([location3], image1.locations) self.assertRaises(ValueError, image1.locations.remove, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_delete_location(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) del image1.locations[0] self.assertEqual([], image_stub1.locations) self.assertEqual(0, len(image1.locations)) self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) image1.delete() def test_image_change_insert_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.insert, 0, location_bad) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_insert_invalid_location_metadata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.insert, 0, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_insert_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.insert(0, location3) self.assertEqual([location3, location2], image_stub1.locations) self.assertEqual([location3, location2], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_delete_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image1.locations.insert(0, location3) del image1.locations[0:100] self.assertEqual([], image_stub1.locations) self.assertEqual(0, len(image1.locations)) self.assertRaises(exception.BadStoreUri, image1.locations.insert, 0, location2) self.assertRaises(exception.BadStoreUri, image2.locations.insert, 0, location3) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_adding_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) image_stub1 = ImageStub('fake_image_id', status='queued', locations=[]) image1 = glance.location.ImageProxy(image_stub1, context, self.store_api, self.store_utils) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.__iadd__, [location_bad]) self.assertEqual([], image_stub1.locations) self.assertEqual([], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_adding_invalid_location_metadata(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) image_stub2 = ImageStub('fake_image_id', status='queued', locations=[]) image2 = glance.location.ImageProxy(image_stub2, context, self.store_api, self.store_utils) location_bad = {'url': UUID2, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image2.locations.__iadd__, [location_bad]) self.assertEqual([], image_stub2.locations) self.assertEqual([], image2.locations) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_adding_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 4 image3.locations += [location2, location3] self.assertEqual([location2, location3], image_stub3.locations) self.assertEqual([location2, location3], image3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_get_location_index(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 4 image3.locations += [location2, location3] self.assertEqual(1, image_stub3.locations.index(location3)) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_get_location_by_index(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 4 image3.locations += [location2, location3] self.assertEqual(1, image_stub3.locations.index(location3)) self.assertEqual(location2, image_stub3.locations[0]) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_checking_location_exists(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} location_bad = {'url': 'unknown://location', 'metadata': {}} with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 4 image3.locations += [location2, location3] self.assertIn(location3, image_stub3.locations) self.assertNotIn(location_bad, image_stub3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_reverse_locations_order(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) with mock.patch("glance.location.store") as mock_store: mock_store.get_size_from_uri_and_backend.return_value = 4 image3.locations += [location2, location3] image_stub3.locations.reverse() self.assertEqual([location3, location2], image_stub3.locations) self.assertEqual([location3, location2], image3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() class TestStoreImageRepo(utils.BaseTestCase): def setUp(self): super(TestStoreImageRepo, self).setUp() self.store_api = unit_test_utils.FakeStoreAPI() store_utils = unit_test_utils.FakeStoreUtils(self.store_api) self.image_stub = ImageStub(UUID1) self.image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, store_utils) self.image_repo_stub = ImageRepoStub() self.image_repo = glance.location.ImageRepoProxy(self.image_repo_stub, {}, self.store_api, store_utils) patcher = mock.patch("glance.location._get_member_repo_for_store", self.get_fake_member_repo) patcher.start() self.addCleanup(patcher.stop) self.fake_member_repo = FakeMemberRepo(self.image, [TENANT1, TENANT2]) self.image_member_repo = glance.location.ImageMemberRepoProxy( self.fake_member_repo, self.image, {}, self.store_api) def get_fake_member_repo(self, image, context, db_api, store_api): return FakeMemberRepo(self.image, [TENANT1, TENANT2]) def test_add_updates_acls(self): self.image_stub.locations = [{'url': 'foo', 'metadata': {}, 'status': 'active'}, {'url': 'bar', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'public' self.image_repo.add(self.image) self.assertTrue(self.store_api.acls['foo']['public']) self.assertEqual([], self.store_api.acls['foo']['read']) self.assertEqual([], self.store_api.acls['foo']['write']) self.assertTrue(self.store_api.acls['bar']['public']) self.assertEqual([], self.store_api.acls['bar']['read']) self.assertEqual([], self.store_api.acls['bar']['write']) def test_add_ignores_acls_if_no_locations(self): self.image_stub.locations = [] self.image_stub.visibility = 'public' self.image_repo.add(self.image) self.assertEqual(0, len(self.store_api.acls)) def test_save_updates_acls(self): self.image_stub.locations = [{'url': 'foo', 'metadata': {}, 'status': 'active'}] self.image_repo.save(self.image) self.assertIn('foo', self.store_api.acls) def test_add_fetches_members_if_private(self): self.image_stub.locations = [{'url': 'glue', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' self.image_repo.add(self.image) self.assertIn('glue', self.store_api.acls) acls = self.store_api.acls['glue'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2], acls['read']) def test_save_fetches_members_if_private(self): self.image_stub.locations = [{'url': 'glue', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' self.image_repo.save(self.image) self.assertIn('glue', self.store_api.acls) acls = self.store_api.acls['glue'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2], acls['read']) def test_member_addition_updates_acls(self): self.image_stub.locations = [{'url': 'glug', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' membership = glance.domain.ImageMembership( UUID1, TENANT3, None, None, status='accepted') self.image_member_repo.add(membership) self.assertIn('glug', self.store_api.acls) acls = self.store_api.acls['glug'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2, TENANT3], acls['read']) def test_member_removal_updates_acls(self): self.image_stub.locations = [{'url': 'glug', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' membership = glance.domain.ImageMembership( UUID1, TENANT1, None, None, status='accepted') self.image_member_repo.remove(membership) self.assertIn('glug', self.store_api.acls) acls = self.store_api.acls['glug'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT2], acls['read']) class TestImageFactory(unit_test_base.StoreClearingUnitTest): def setUp(self): super(TestImageFactory, self).setUp() store_api = unit_test_utils.FakeStoreAPI() store_utils = unit_test_utils.FakeStoreUtils(store_api) self.image_factory = glance.location.ImageFactoryProxy( ImageFactoryStub(), glance.context.RequestContext(user=USER1), store_api, store_utils) def test_new_image(self): image = self.image_factory.new_image() self.assertIsNone(image.image_id) self.assertIsNone(image.status) self.assertEqual('private', image.visibility) self.assertEqual([], image.locations) def test_new_image_with_location(self): locations = [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}}] image = self.image_factory.new_image(locations=locations) self.assertEqual(locations, image.locations) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, self.image_factory.new_image, locations=[location_bad]) class TestStoreMetaDataChecker(utils.BaseTestCase): def test_empty(self): glance_store.check_location_metadata({}) def test_unicode(self): m = {'key': 'somevalue'} glance_store.check_location_metadata(m) def test_unicode_list(self): m = {'key': ['somevalue', '2']} glance_store.check_location_metadata(m) def test_unicode_dict(self): inner = {'key1': 'somevalue', 'key2': 'somevalue'} m = {'topkey': inner} glance_store.check_location_metadata(m) def test_unicode_dict_list(self): inner = {'key1': 'somevalue', 'key2': 'somevalue'} m = {'topkey': inner, 'list': ['somevalue', '2'], 'u': '2'} glance_store.check_location_metadata(m) def test_nested_dict(self): inner = {'key1': 'somevalue', 'key2': 'somevalue'} inner = {'newkey': inner} inner = {'anotherkey': inner} m = {'topkey': inner} glance_store.check_location_metadata(m) def test_simple_bad(self): m = {'key1': object()} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) def test_list_bad(self): m = {'key1': ['somevalue', object()]} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) def test_nested_dict_bad(self): inner = {'key1': 'somevalue', 'key2': object()} inner = {'newkey': inner} inner = {'anotherkey': inner} m = {'topkey': inner} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_store_location.py0000664000175000017500000000622200000000000023067 0ustar00zuulzuul00000000000000# Copyright 2011-2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from unittest import mock from glance.common import exception from glance.common import store_utils import glance.location from glance.tests.unit import base CONF = {'default_store': 'file', 'swift_store_auth_address': 'localhost:8080', 'swift_store_container': 'glance', 'swift_store_user': 'user', 'swift_store_key': 'key', 'default_swift_reference': 'store_1' } class TestStoreLocation(base.StoreClearingUnitTest): class FakeImageProxy(object): size = None context = None store_api = mock.Mock() store_utils = store_utils def test_add_location_for_image_without_size(self): def fake_get_size_from_backend(uri, context=None): return 1 self.mock_object(glance_store, 'get_size_from_backend', fake_get_size_from_backend) with mock.patch('glance.location._check_image_location'): loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} loc2 = {'url': 'file:///fake2.img.tar.gz', 'metadata': {}} # Test for insert location image1 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image1, []) locations.insert(0, loc2) self.assertEqual(1, image1.size) # Test for set_attr of _locations_proxy image2 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image2, [loc1]) locations[0] = loc2 self.assertIn(loc2, locations) self.assertEqual(1, image2.size) def test_add_location_with_restricted_sources(self): loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} loc2 = {'url': 'swift+config:///xxx', 'metadata': {}} loc3 = {'url': 'filesystem:///foo.img.tar.gz', 'metadata': {}} # Test for insert location image1 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image1, []) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc1) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc3) self.assertNotIn(loc1, locations) self.assertNotIn(loc3, locations) # Test for set_attr of _locations_proxy image2 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image2, [loc1]) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc2) self.assertNotIn(loc2, locations) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_test_utils.py0000664000175000017500000000244000000000000022240 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.tests import utils as test_utils class TestFakeData(test_utils.BaseTestCase): def test_via_read(self): fd = test_utils.FakeData(1024) data = [] for i in range(0, 1025, 256): chunk = fd.read(256) data.append(chunk) if not chunk: break self.assertEqual(5, len(data)) # Make sure we got a zero-length final read self.assertEqual(b'', data[-1]) # Make sure we only got 1024 bytes self.assertEqual(1024, len(b''.join(data))) def test_via_iter(self): data = b''.join(list(test_utils.FakeData(1024))) self.assertEqual(1024, len(data)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/test_versions.py0000664000175000017500000004417700000000000021726 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import ddt import webob from oslo_serialization import jsonutils from glance.api.middleware import version_negotiation from glance.api import versions from glance.tests.unit import base # make this public so it doesn't need to be repeated for the # functional tests def get_versions_list(url, enabled_backends=False, enabled_cache=False): image_versions = [ { 'id': 'v2.15', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.9', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.17', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.7', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.6', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.5', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.4', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.3', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, ] if enabled_backends: image_versions = [ { 'id': 'v2.15', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.13', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.12', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.11', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.10', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.9', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }, { 'id': 'v2.8', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], } ] + image_versions[2:] if enabled_cache: image_versions[0]['status'] = 'SUPPORTED' image_versions.insert(1, { 'id': 'v2.14', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }) image_versions.insert(0, { 'id': 'v2.16', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': '%s/v2/' % url}], }) return image_versions class VersionsTest(base.IsolatedUnitTest): """Test the version information returned from the API service.""" def test_get_version_list(self): req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') req.accept = 'application/json' self.config(bind_host='127.0.0.1', bind_port=9292) res = versions.Controller().index(req) self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('http://127.0.0.1:9292') self.assertEqual(expected, results) self.config(enabled_backends='slow:one,fast:two') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('http://127.0.0.1:9292', enabled_backends=True) self.assertEqual(expected, results) self.config(image_cache_dir='/tmp/cache') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('http://127.0.0.1:9292', enabled_backends=True, enabled_cache=True) self.assertEqual(expected, results) def test_get_version_list_public_endpoint(self): req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') req.accept = 'application/json' self.config(bind_host='127.0.0.1', bind_port=9292, public_endpoint='https://example.com:9292') res = versions.Controller().index(req) self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('https://example.com:9292') self.assertEqual(expected, results) self.config(enabled_backends='slow:one,fast:two') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('https://example.com:9292', enabled_backends=True) self.assertEqual(expected, results) self.config(image_cache_dir='/tmp/cache') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list('https://example.com:9292', enabled_backends=True, enabled_cache=True) self.assertEqual(expected, results) def test_get_version_list_for_external_app(self): url = 'http://customhost:9292/app/api' req = webob.Request.blank('/', base_url=url) self.config(bind_host='127.0.0.1', bind_port=9292) res = versions.Controller().index(req) self.assertEqual(http.MULTIPLE_CHOICES, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list(url) self.assertEqual(expected, results) self.config(enabled_backends='slow:one,fast:two') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list(url, enabled_backends=True) self.assertEqual(expected, results) self.config(image_cache_dir='/tmp/cache') res = versions.Controller().index(req) results = jsonutils.loads(res.body)['versions'] expected = get_versions_list(url, enabled_backends=True, enabled_cache=True) class VersionNegotiationTest(base.IsolatedUnitTest): def setUp(self): super(VersionNegotiationTest, self).setUp() self.middleware = version_negotiation.VersionNegotiationFilter(None) def test_request_url_v2(self): request = webob.Request.blank('/v2/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_0(self): request = webob.Request.blank('/v2.0/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_1(self): request = webob.Request.blank('/v2.1/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_2(self): request = webob.Request.blank('/v2.2/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_3(self): request = webob.Request.blank('/v2.3/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_4(self): request = webob.Request.blank('/v2.4/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_5(self): request = webob.Request.blank('/v2.5/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_6(self): request = webob.Request.blank('/v2.6/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_7(self): request = webob.Request.blank('/v2.7/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_9(self): request = webob.Request.blank('/v2.9/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_15(self): request = webob.Request.blank('/v2.15/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) # note: these need separate unsupported/supported tests to reset the # the memoized allowed_versions in the VersionNegotiationFilter instance def test_request_url_v2_8_default_unsupported(self): request = webob.Request.blank('/v2.8/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_8_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.8/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_10_default_unsupported(self): request = webob.Request.blank('/v2.10/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_10_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.10/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_11_default_unsupported(self): request = webob.Request.blank('/v2.11/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_11_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.11/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_12_default_unsupported(self): request = webob.Request.blank('/v2.12/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_12_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.12/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_13_default_unsupported(self): request = webob.Request.blank('/v2.13/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_13_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.13/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_14_default_unsupported(self): request = webob.Request.blank('/v2.14/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_14_enabled_supported(self): self.config(image_cache_dir='/tmp/cache') request = webob.Request.blank('/v2.14/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_16_default_unsupported(self): request = webob.Request.blank('/v2.16/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_16_enabled_supported(self): self.config(image_cache_dir='/tmp/cache') request = webob.Request.blank('/v2.16/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_17_default_supported(self): request = webob.Request.blank('/v2.17/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_17_enabled_supported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.17/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) # version 2.18 does not exist def test_request_url_v2_18_default_unsupported(self): request = webob.Request.blank('/v2.18/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v2_18_enabled_unsupported(self): self.config(enabled_backends='slow:one,fast:two') request = webob.Request.blank('/v2.18/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) @ddt.ddt class VersionsAndNegotiationTest(VersionNegotiationTest, VersionsTest): """ Test that versions mentioned in the versions response are correctly negotiated. """ def _get_list_of_version_ids(self, status): request = webob.Request.blank('/') request.accept = 'application/json' response = versions.Controller().index(request) v_list = jsonutils.loads(response.body)['versions'] return [v['id'] for v in v_list if v['status'] == status] def _assert_version_is_negotiated(self, version_id): request = webob.Request.blank("/%s/images" % version_id) self.middleware.process_request(request) major = version_id.split('.', 1)[0] expected = "/%s/images" % major self.assertEqual(expected, request.path_info) # the content of the version list depends on two configuration # options: # - CONF.enabled_backends # - CONF.image_cache_dir # So we need to check a bunch of combinations cache = '/var/cache' multistore = 'slow:one,fast:two' combos = ((None, None), (None, multistore), (cache, None), (cache, multistore)) @ddt.data(*combos) @ddt.unpack def test_current_is_negotiated(self, cache, multistore): # NOTE(rosmaita): Bug 1609571: the versions response was correct, but # the negotiation had not been updated for the CURRENT version. self.config(enabled_backends=multistore) self.config(image_cache_dir=cache) to_check = self._get_list_of_version_ids('CURRENT') self.assertTrue(to_check) for version_id in to_check: self._assert_version_is_negotiated(version_id) @ddt.data(*combos) @ddt.unpack def test_supported_is_negotiated(self, cache, multistore): self.config(enabled_backends=multistore) self.config(image_cache_dir=cache) to_check = self._get_list_of_version_ids('SUPPORTED') for version_id in to_check: self._assert_version_is_negotiated(version_id) @ddt.data(*combos) @ddt.unpack def test_deprecated_is_negotiated(self, cache, multistore): self.config(enabled_backends=multistore) self.config(image_cache_dir=cache) to_check = self._get_list_of_version_ids('DEPRECATED') for version_id in to_check: self._assert_version_is_negotiated(version_id) @ddt.data(*combos) @ddt.unpack def test_experimental_is_negotiated(self, cache, multistore): self.config(enabled_backends=multistore) self.config(image_cache_dir=cache) to_check = self._get_list_of_version_ids('EXPERIMENTAL') for version_id in to_check: self._assert_version_is_negotiated(version_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/utils.py0000664000175000017500000003020200000000000020137 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import exceptions as crypto_exception import glance_store as store from unittest import mock import urllib from oslo_config import cfg from oslo_policy import policy from glance.async_.flows._internal_plugins import base_download from glance.common import exception from glance.common import store_utils from glance.common import wsgi import glance.context import glance.db.simple.api as simple_db CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754' USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd' BASE_URI = 'http://storeurl.com/container' def sort_url_by_qs_keys(url): # NOTE(kragniz): this only sorts the keys of the query string of a url. # For example, an input of '/v2/tasks?sort_key=id&sort_dir=asc&limit=10' # returns '/v2/tasks?limit=10&sort_dir=asc&sort_key=id'. This is to prevent # non-deterministic ordering of the query string causing problems with unit # tests. parsed = urllib.parse.urlparse(url) queries = urllib.parse.parse_qsl(parsed.query, True) sorted_query = sorted(queries, key=lambda x: x[0]) encoded_sorted_query = urllib.parse.urlencode(sorted_query, True) url_parts = (parsed.scheme, parsed.netloc, parsed.path, parsed.params, encoded_sorted_query, parsed.fragment) return urllib.parse.urlunparse(url_parts) def get_fake_request(path='', method='POST', is_admin=False, user=USER1, roles=None, headers=None, tenant=TENANT1): if roles is None: roles = ['member', 'reader'] req = wsgi.Request.blank(path) req.method = method req.headers = {'x-openstack-request-id': 'my-req'} if headers is not None: req.headers.update(headers) kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } req.context = glance.context.RequestContext(**kwargs) return req def enforcer_from_rules(unparsed_rules): rules = policy.Rules.from_dict(unparsed_rules) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) enforcer.set_rules(rules, overwrite=True) return enforcer def fake_get_size_from_backend(uri, context=None): return 1 def fake_get_verifier(context, img_signature_certificate_uuid, img_signature_hash_method, img_signature, img_signature_key_type): verifier = mock.Mock() if (img_signature is not None and img_signature == 'VALID'): verifier.verify.return_value = None else: ex = crypto_exception.InvalidSignature() verifier.verify.side_effect = ex return verifier def get_fake_context(user=USER1, tenant=TENANT1, roles=None, is_admin=False): if roles is None: roles = ['member'] kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } context = glance.context.RequestContext(**kwargs) return context class FakeDB(object): def __init__(self, initialize=True): self.reset() if initialize: self.init_db() @staticmethod def init_db(): images = [ {'id': UUID1, 'owner': TENANT1, 'status': 'queued', 'locations': [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'queued'}], 'disk_format': 'raw', 'container_format': 'bare'}, {'id': UUID2, 'owner': TENANT1, 'status': 'queued', 'disk_format': 'raw', 'container_format': 'bare'}, ] [simple_db.image_create(None, image) for image in images] members = [ {'image_id': UUID1, 'member': TENANT1, 'can_share': True}, {'image_id': UUID1, 'member': TENANT2, 'can_share': False}, ] [simple_db.image_member_create(None, member) for member in members] simple_db.image_tag_set_all(None, UUID1, ['ping', 'pong']) @staticmethod def reset(): simple_db.reset() def __getattr__(self, key): return getattr(simple_db, key) class FakeStoreUtils(object): def __init__(self, store_api): self.store_api = store_api def safe_delete_from_backend(self, context, id, location): try: del self.store_api.data[location['url']] except KeyError: pass def schedule_delayed_delete_from_backend(self, context, id, location): pass def delete_image_location_from_backend(self, context, image_id, location): if CONF.delayed_delete: self.schedule_delayed_delete_from_backend(context, image_id, location) else: self.safe_delete_from_backend(context, image_id, location) def validate_external_location(self, uri): if uri and urllib.parse.urlparse(uri).scheme: return store_utils.validate_external_location(uri) else: return True class FakeStoreAPI(object): def __init__(self, store_metadata=None): self.data = { '%s/%s' % (BASE_URI, UUID1): ('XXX', 3), '%s/fake_location' % (BASE_URI): ('YYY', 3) } self.acls = {} if store_metadata is None: self.store_metadata = {} else: self.store_metadata = store_metadata def create_stores(self): pass def set_acls(self, uri, public=False, read_tenants=None, write_tenants=None, context=None): if read_tenants is None: read_tenants = [] if write_tenants is None: write_tenants = [] self.acls[uri] = { 'public': public, 'read': read_tenants, 'write': write_tenants, } def get_from_backend(self, location, offset=0, chunk_size=None, context=None): try: scheme = location[:location.find('/') - 1] if scheme == 'unknown': raise store.UnknownScheme(scheme=scheme) return self.data[location] except KeyError: raise store.NotFound(image=location) def get_size_from_backend(self, location, context=None): return self.get_from_backend(location, context=context)[1] def add_to_backend(self, conf, image_id, data, size, scheme=None, context=None, verifier=None): store_max_size = 7 current_store_size = 2 for location in self.data.keys(): if image_id in location: raise exception.Duplicate() if not size: # 'data' is a string wrapped in a LimitingReader|CooperativeReader # pipeline, so peek under the hood of those objects to get at the # string itself. size = len(data.data.fd) if (current_store_size + size) > store_max_size: raise exception.StorageFull() if context.user_id == USER2: raise exception.Forbidden() if context.user_id == USER3: raise exception.StorageWriteDenied() self.data[image_id] = (data, size) checksum = 'Z' return (image_id, size, checksum, self.store_metadata) def add_to_backend_with_multihash( self, conf, image_id, data, size, hashing_algo, scheme=None, context=None, verifier=None): store_max_size = 7 current_store_size = 2 for location in self.data.keys(): if image_id in location: raise exception.Duplicate() if not size: # 'data' is a string wrapped in a LimitingReader|CooperativeReader # pipeline, so peek under the hood of those objects to get at the # string itself. size = len(data.data.fd._source) if (current_store_size + size) > store_max_size: raise exception.StorageFull() if context.user_id == USER2: raise exception.Forbidden() if context.user_id == USER3: raise exception.StorageWriteDenied() self.data[image_id] = (data, size) checksum = 'Z' multihash = 'ZZ' return (image_id, size, checksum, multihash, self.store_metadata) def check_location_metadata(self, val, key=''): store.check_location_metadata(val) def delete_from_backend(self, uri, context=None): pass class FakeStoreAPIReader(FakeStoreAPI): """A store API that actually reads from the data pipe.""" def add_to_backend_with_multihash(self, conf, image_id, data, size, hashing_algo, scheme=None, context=None, verifier=None): for chunk in data: pass return super(FakeStoreAPIReader, self).add_to_backend_with_multihash( conf, image_id, data, size, hashing_algo, scheme=scheme, context=context, verifier=verifier) class FakePolicyEnforcer(object): def __init__(self, *_args, **kwargs): self.rules = {} def enforce(self, _ctxt, action, target=None, **kwargs): """Raise Forbidden if a rule for given action is set to false.""" if self.rules.get(action) is False: raise exception.Forbidden() def set_rules(self, rules): self.rules = rules class FakeNotifier(object): def __init__(self, *_args, **kwargs): self.log = [] def _notify(self, event_type, payload, level): log = { 'notification_type': level, 'event_type': event_type, 'payload': payload } self.log.append(log) def warn(self, event_type, payload): self._notify(event_type, payload, 'WARN') def info(self, event_type, payload): self._notify(event_type, payload, 'INFO') def error(self, event_type, payload): self._notify(event_type, payload, 'ERROR') def debug(self, event_type, payload): self._notify(event_type, payload, 'DEBUG') def critical(self, event_type, payload): self._notify(event_type, payload, 'CRITICAL') def get_logs(self): return self.log class FakeGateway(object): def __init__(self, image_factory=None, image_member_factory=None, image_repo=None, task_factory=None, task_repo=None): self.image_factory = image_factory self.image_member_factory = image_member_factory self.image_repo = image_repo self.task_factory = task_factory self.task_repo = task_repo def get_image_factory(self, context): return self.image_factory def get_image_member_factory(self, context): return self.image_member_factory def get_repo(self, context): return self.image_repo def get_task_factory(self, context): return self.task_factory def get_task_repo(self, context): return self.task_repo class FakeTask(object): def __init__(self, task_id, type=None, status=None): self.task_id = task_id self.type = type self.message = None self.input = None self._status = status self._executor = None def success(self, result): self.result = result self._status = 'success' def fail(self, message): self.message = message self._status = 'failure' class FakeBaseDownloadPlugin(base_download.BaseDownload): def execute(self): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9143097 glance-29.0.0/glance/tests/unit/v2/0000775000175000017500000000000000000000000016757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/__init__.py0000664000175000017500000000000000000000000021056 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_cache_management_api.py0000664000175000017500000001436300000000000024467 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from glance.api.v2 import cached_images from glance import notifier import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class FakeImage(object): def __init__(self, id=None, status='active', container_format='ami', disk_format='ami', locations=None): self.id = id or UUID1 self.status = status self.container_format = container_format self.disk_format = disk_format self.locations = locations self.owner = unit_test_utils.TENANT1 self.created_at = '' self.updated_at = '' self.min_disk = '' self.min_ram = '' self.protected = False self.checksum = '' self.os_hash_algo = '' self.os_hash_value = '' self.size = 0 self.virtual_size = 0 self.visibility = 'public' self.os_hidden = False self.name = 'foo' self.tags = [] self.extra_properties = {} self.member = self.owner # NOTE(danms): This fixture looks more like the db object than # the proxy model. This needs fixing all through the tests # below. self.image_id = self.id class TestCacheManageAPI(test_utils.BaseTestCase): def setUp(self): super(TestCacheManageAPI, self).setUp() self.req = unit_test_utils.get_fake_request() def _main_test_helper(self, argv, status='active', image_mock=True): with mock.patch.object(notifier.ImageRepoProxy, 'get') as mock_get: image = FakeImage(status=status) mock_get.return_value = image with mock.patch.object(cached_images.CacheController, '_enforce') as e: with mock.patch('glance.image_cache.ImageCache') as ic: cc = cached_images.CacheController() cc.cache = ic c_calls = [] c_calls += argv[0].split(',') for call in c_calls: mock.patch.object(ic, call) test_call = getattr(cc, argv[1]) new_policy = argv[2] args = [] if len(argv) == 4: args = argv[3:] test_call(self.req, *args) if image_mock: e.assert_called_once_with(self.req, image=image, new_policy=new_policy) else: e.assert_called_once_with(self.req, new_policy=new_policy) mcs = [] for method in ic.method_calls: mcs.append(str(method)) for call in c_calls: if args == []: args.append("") elif args[0] and not args[0].endswith("'"): args[0] = "'" + args[0] + "'" self.assertIn("call." + call + "(" + args[0] + ")", mcs) self.assertEqual(len(c_calls), len(mcs)) def test_delete_cache_entry(self): self._main_test_helper(['delete_cached_image,delete_queued_image', 'delete_cache_entry', 'cache_delete', UUID1]) def test_clear_cache(self): self._main_test_helper( ['delete_all_cached_images,delete_all_queued_images', 'clear_cache', 'cache_delete'], image_mock=False) def test_get_cache_state(self): self._main_test_helper(['get_cached_images,get_queued_images', 'get_cache_state', 'cache_list'], image_mock=False) @mock.patch.object(cached_images, 'WORKER') def test_queue_image_from_api(self, mock_worker): self._main_test_helper(['queue_image', 'queue_image_from_api', 'cache_image', UUID1]) mock_worker.submit.assert_called_once_with(UUID1) def test_init_no_config(self): # Make sure the worker was reset to uninitialized self.assertIsNone(cached_images.WORKER) self.config(image_cache_dir=None) cached_images.CacheController() # Make sure it is still None because image_cache_dir was not # set self.assertIsNone(cached_images.WORKER) def test_init_with_config(self): # Make sure the worker was reset to uninitialized self.assertIsNone(cached_images.WORKER) self.config(image_cache_dir='/tmp') cached_images.CacheController() # Make sure we initialized it because config told us to self.assertIsNotNone(cached_images.WORKER) self.assertTrue(cached_images.WORKER.is_alive()) cached_images.WORKER.terminate() class TestCacheWorker(test_utils.BaseTestCase): @mock.patch('glance.image_cache.prefetcher.Prefetcher') def test_worker_lifecycle(self, mock_pf): worker = cached_images.CacheWorker() self.assertFalse(worker.is_alive()) worker.start() self.assertTrue(worker.is_alive()) worker.submit('123') worker.submit('456') self.assertTrue(worker.is_alive()) worker.terminate() self.assertFalse(worker.is_alive()) mock_pf.return_value.fetch_image_into_cache.assert_has_calls([ mock.call('123'), mock.call('456')]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_discovery_image_import.py0000664000175000017500000000334500000000000025140 0ustar00zuulzuul00000000000000# Copyright (c) 2017 RedHat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glance.api.v2.discovery import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils class TestInfoControllers(test_utils.BaseTestCase): def setUp(self): super(TestInfoControllers, self).setUp() self.controller = glance.api.v2.discovery.InfoController() def test_get_import_info_with_empty_method_list(self): """When methods list is empty, should still return import methods""" self.config(enabled_import_methods=[]) req = unit_test_utils.get_fake_request() output = self.controller.get_image_import(req) self.assertIn('import-methods', output) self.assertEqual([], output['import-methods']['value']) def test_get_import_info(self): """Testing defaults, not all possible values""" default_import_methods = ['glance-direct', 'web-download', 'copy-image'] req = unit_test_utils.get_fake_request() output = self.controller.get_image_import(req) self.assertIn('import-methods', output) self.assertEqual(default_import_methods, output['import-methods']['value']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_discovery_stores.py0000664000175000017500000001507200000000000024003 0ustar00zuulzuul00000000000000# Copyright (c) 2018-2019 RedHat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glance_store from oslo_config import cfg import webob.exc import glance.api.v2.discovery from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils CONF = cfg.CONF class TestInfoControllers(base.MultiStoreClearingUnitTest): def setUp(self): super(TestInfoControllers, self).setUp() self.controller = glance.api.v2.discovery.InfoController() def tearDown(self): super(TestInfoControllers, self).tearDown() def test_get_stores_with_enabled_backends_empty(self): self.config(enabled_backends={}) req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.get_stores, req) def test_get_stores(self): available_stores = ['cheap', 'fast', 'readonly_store', 'fast-cinder', 'fast-rbd', 'reliable'] req = unit_test_utils.get_fake_request() output = self.controller.get_stores(req) self.assertIn('stores', output) for stores in output['stores']: self.assertIn('id', stores) self.assertNotIn('weight', stores) self.assertIn(stores['id'], available_stores) def test_get_stores_read_only_store(self): available_stores = ['cheap', 'fast', 'readonly_store', 'fast-cinder', 'fast-rbd', 'reliable'] req = unit_test_utils.get_fake_request() output = self.controller.get_stores(req) self.assertIn('stores', output) for stores in output['stores']: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) if stores['id'] == 'readonly_store': self.assertTrue(stores['read-only']) else: self.assertIsNone(stores.get('read-only')) def test_get_stores_reserved_stores_excluded(self): enabled_backends = { 'fast': 'file', 'cheap': 'file' } self.config(enabled_backends=enabled_backends) req = unit_test_utils.get_fake_request() output = self.controller.get_stores(req) self.assertIn('stores', output) self.assertEqual(2, len(output['stores'])) for stores in output["stores"]: self.assertFalse(stores["id"].startswith("os_glance_")) def test_get_stores_detail(self): available_stores = ['cheap', 'fast', 'readonly_store', 'fast-cinder', 'fast-rbd', 'reliable'] available_store_type = ['file', 'file', 'http', 'cinder', 'rbd', 'swift'] req = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.get_stores_detail(req) self.assertEqual(len(CONF.enabled_backends), len(output['stores'])) self.assertIn('stores', output) for stores in output['stores']: self.assertIn('id', stores) self.assertIn(stores['id'], available_stores) self.assertIn(stores['type'], available_store_type) self.assertIsNotNone(stores['properties']) def test_get_stores_detail_properties(self): store_attributes = {'rbd': ['chunk_size', 'pool', 'thin_provisioning'], 'file': ['data_dir', 'chunk_size', 'thin_provisioning'], 'cinder': ['volume_type', 'use_multipath'], 'swift': ['container', 'large_object_size', 'large_object_chunk_size'], 'http': []} req = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.get_stores_detail(req) self.assertEqual(len(CONF.enabled_backends), len(output['stores'])) self.assertIn('stores', output) for store in output['stores']: actual_attribute = list(store['properties'].keys()) expected_attribute = store_attributes[store['type']] self.assertEqual(actual_attribute, expected_attribute) def test_get_stores_detail_with_store_weight(self): self.config(weight=100, group='fast') self.config(weight=200, group='cheap') self.config(weight=300, group='fast-rbd') self.config(weight=400, group='fast-cinder') self.config(weight=500, group='reliable') req = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.get_stores_detail(req) self.assertEqual(len(CONF.enabled_backends), len(output['stores'])) self.assertIn('stores', output) for store in output['stores']: self.assertIn('weight', store) def test_get_stores_detail_non_admin(self): req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_stores_detail, req) def test_swift_multitenant_and_conf_file_enabled(self): self.config(enabled_backends={'fast-rbd': 'rbd', 'test': 'swift'}) glance_store.register_store_opts(CONF) self.config(default_backend='fast-rbd', group='glance_store') self.config(rbd_store_chunk_size=8688388, rbd_store_pool='images', rbd_thin_provisioning=False, group='fast-rbd') self.config(swift_store_container='glance', swift_store_large_object_size=524288000, swift_store_large_object_chunk_size=204800000, swift_store_config_file='fake-file.conf', swift_store_multi_tenant=True, group='test') glance_store.create_multi_stores(CONF) req = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.get_stores_detail(req) self.assertNotEqual(len(CONF.enabled_backends), len(output['stores'])) self.assertNotIn('test', [store.get('id') for store in output['stores']]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_image_actions_resource.py0000664000175000017500000001452700000000000025112 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store as store import webob import glance.api.v2.image_actions as image_actions import glance.context from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils BASE_URI = unit_test_utils.BASE_URI USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'visibility': 'shared', 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj class TestImageActionsController(base.IsolatedUnitTest): def setUp(self): super(TestImageActionsController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() for i in range(1, 4): self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) self.store_utils = unit_test_utils.FakeStoreUtils(self.store) self.controller = image_actions.ImageActionsController( self.db, self.policy, self.notifier, self.store) self.controller.gateway.store_utils = self.store_utils store.create_stores() def _get_fake_context(self, user=USER1, tenant=TENANT1, roles=None, is_admin=False): if roles is None: roles = ['member'] kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } context = glance.context.RequestContext(**kwargs) return context def _create_image(self, status): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, name='1', size=256, virtual_size=1024, visibility='public', locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}], disk_format='raw', container_format='bare', status=status), ] context = self._get_fake_context() [self.db.image_create(context, image) for image in self.images] def test_deactivate_from_active(self): self._create_image('active') request = unit_test_utils.get_fake_request() self.controller.deactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('deactivated', image['status']) def test_deactivate_from_deactivated(self): self._create_image('deactivated') request = unit_test_utils.get_fake_request() self.controller.deactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('deactivated', image['status']) def _test_deactivate_from_wrong_status(self, status): # deactivate will yield an error if the initial status is anything # other than 'active' or 'deactivated' self._create_image(status) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.deactivate, request, UUID1) def test_deactivate_from_queued(self): self._test_deactivate_from_wrong_status('queued') def test_deactivate_from_saving(self): self._test_deactivate_from_wrong_status('saving') def test_deactivate_from_killed(self): self._test_deactivate_from_wrong_status('killed') def test_deactivate_from_pending_delete(self): self._test_deactivate_from_wrong_status('pending_delete') def test_deactivate_from_deleted(self): self._test_deactivate_from_wrong_status('deleted') def test_reactivate_from_active(self): self._create_image('active') request = unit_test_utils.get_fake_request() self.controller.reactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('active', image['status']) def test_reactivate_from_deactivated(self): self._create_image('deactivated') request = unit_test_utils.get_fake_request() self.controller.reactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('active', image['status']) def _test_reactivate_from_wrong_status(self, status): # reactivate will yield an error if the initial status is anything # other than 'active' or 'deactivated' self._create_image(status) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.reactivate, request, UUID1) def test_reactivate_from_queued(self): self._test_reactivate_from_wrong_status('queued') def test_reactivate_from_saving(self): self._test_reactivate_from_wrong_status('saving') def test_reactivate_from_killed(self): self._test_reactivate_from_wrong_status('killed') def test_reactivate_from_pending_delete(self): self._test_reactivate_from_wrong_status('pending_delete') def test_reactivate_from_deleted(self): self._test_reactivate_from_wrong_status('deleted') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_image_data_resource.py0000664000175000017500000013676000000000000024367 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import io from unittest import mock import uuid from cursive import exception as cursive_exception import glance_store from glance_store._drivers import filesystem from oslo_config import cfg import webob import glance.api.policy import glance.api.v2.image_data from glance.common import exception from glance.common import wsgi from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF CONF.import_opt('public_endpoint', 'glance.api.versions') class Raise(object): def __init__(self, exc): self.exc = exc def __call__(self, *args, **kwargs): raise self.exc class FakeImage(object): def __init__(self, image_id=None, data=None, checksum=None, size=0, virtual_size=0, locations=None, container_format='bear', disk_format='rawr', status=None, owner=None): self.image_id = image_id self.data = data self.checksum = checksum self.size = size self.virtual_size = virtual_size self.locations = locations self.container_format = container_format self.disk_format = disk_format self.owner = owner self._status = status self.extra_properties = {} @property def status(self): return self._status @status.setter def status(self, value): if isinstance(self._status, BaseException): raise self._status else: self._status = value def get_data(self, offset=0, chunk_size=None): if chunk_size: return self.data[offset:offset + chunk_size] return self.data[offset:] def set_data(self, data, size=None, backend=None, set_active=True): self.data = ''.join(data) self.size = size self.status = 'modified-by-fake' class FakeImageRepo(object): def __init__(self, result=None): self.result = result def get(self, image_id): if isinstance(self.result, BaseException): raise self.result else: return self.result def save(self, image, from_state=None): self.saved_image = image class FakeGateway(object): def __init__(self, db=None, store=None, notifier=None, policy=None, repo=None): self.db = db self.store = store self.notifier = notifier self.policy = policy self.repo = repo def get_repo(self, context): return self.repo class TestImagesController(base.StoreClearingUnitTest): def setUp(self): super(TestImagesController, self).setUp() self.config(debug=True) self.image_repo = FakeImageRepo() db = unit_test_utils.FakeDB() policy = unit_test_utils.FakePolicyEnforcer() notifier = unit_test_utils.FakeNotifier() store = unit_test_utils.FakeStoreAPI() self.controller = glance.api.v2.image_data.ImageDataController() self.controller.gateway = FakeGateway(db, store, notifier, policy, self.image_repo) # FIXME(abhishekk): Everything is fake in this test, so mocked the # image mutable_check, Later we need to fix these tests to use # some realistic data patcher = mock.patch('glance.api.v2.policy.check_is_image_mutable') patcher.start() self.addCleanup(patcher.stop) def test_download(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd', locations=[{'url': 'http://example.com/image', 'metadata': {}, 'status': 'active'}]) self.image_repo.result = image image = self.controller.download(request, unit_test_utils.UUID1) self.assertEqual('abcd', image.image_id) def test_download_deactivated(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd', status='deactivated', locations=[{'url': 'http://example.com/image', 'metadata': {}, 'status': 'active'}]) self.image_repo.result = image self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, request, str(uuid.uuid4())) def test_download_no_location(self): # NOTE(mclaren): NoContent will be raised by the ResponseSerializer # That's tested below. request = unit_test_utils.get_fake_request(roles=['admin', 'member']) self.image_repo.result = FakeImage('abcd') image = self.controller.download(request, unit_test_utils.UUID2) self.assertEqual('abcd', image.image_id) def test_download_non_existent_image(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.NotFound() self.assertRaises(webob.exc.HTTPNotFound, self.controller.download, request, str(uuid.uuid4())) def test_download_forbidden(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.Forbidden() self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, request, str(uuid.uuid4())) def test_download_ok_when_get_image_location_forbidden(self): class ImageLocations(object): def __len__(self): raise exception.Forbidden() request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') self.image_repo.result = image image.locations = ImageLocations() image = self.controller.download(request, unit_test_utils.UUID1) self.assertEqual('abcd', image.image_id) def test_upload(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd', owner='tenant1') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) def test_upload_not_allowed_by_policy(self): request = unit_test_utils.get_fake_request() with mock.patch.object(self.controller.policy, 'enforce') as mock_enf: mock_enf.side_effect = webob.exc.HTTPForbidden() exc = self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertTrue(mock_enf.called) # Make sure we did not leak details of the original Forbidden # error into the NotFound returned to the client. self.assertEqual('The resource could not be found.', str(exc)) # Now reject the upload_image call, but allow get_image to ensure that # we properly see a Forbidden result. with mock.patch.object(self.controller.policy, 'enforce') as mock_enf: mock_enf.side_effect = [webob.exc.HTTPForbidden(), lambda *a: None] exc = self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertTrue(mock_enf.called) def test_upload_status(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') self.image_repo.result = image insurance = {'called': False} def read_data(): insurance['called'] = True self.assertEqual('saving', self.image_repo.saved_image.status) yield 'YYYY' self.controller.upload(request, unit_test_utils.UUID2, read_data(), None) self.assertTrue(insurance['called']) self.assertEqual('modified-by-fake', self.image_repo.saved_image.status) def test_upload_no_size(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', None) self.assertEqual('YYYY', image.data) self.assertIsNone(image.size) @mock.patch.object(glance.api.policy.Enforcer, 'enforce') def test_upload_image_forbidden(self, mock_enforce): request = unit_test_utils.get_fake_request() image = FakeImage('abcd', owner='tenant1') self.image_repo.result = image mock_enforce.side_effect = [exception.Forbidden, lambda *a: None] self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload, request, unit_test_utils.UUID2, 'YYYY', 4) expected_call = [ mock.call(mock.ANY, 'upload_image', mock.ANY), mock.call(mock.ANY, 'get_image', mock.ANY) ] mock_enforce.assert_has_calls(expected_call) def test_upload_invalid(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') image.status = ValueError() self.image_repo.result = image self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) def test_upload_with_expired_token(self): def side_effect(image, from_state=None): if from_state == 'saving': raise exception.NotAuthenticated() mocked_save = mock.Mock(side_effect=side_effect) mocked_delete = mock.Mock() request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') image.delete = mocked_delete self.image_repo.result = image self.image_repo.save = mocked_save self.assertRaises(webob.exc.HTTPUnauthorized, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertEqual(3, mocked_save.call_count) mocked_delete.assert_called_once_with() @mock.patch('glance.common.trust_auth.TokenRefresher') def test_upload_with_token_refresh(self, mock_refresher): mock_refresher.return_value = mock.MagicMock() mocked_save = mock.Mock() mocked_save.side_effect = [lambda *a: None, exception.NotAuthenticated(), lambda *a: None] request = unit_test_utils.get_fake_request(roles=['admin', 'member']) request.environ['keystone.token_info'] = { 'token': { 'roles': [{'name': 'member'}] } } image = FakeImage('abcd', owner='tenant1') self.image_repo.result = image self.image_repo.save = mocked_save self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) self.assertEqual(3, mocked_save.call_count) def test_upload_non_existent_image_during_save_initiates_deletion(self): def fake_save_not_found(self, from_state=None): raise exception.ImageNotFound() def fake_save_conflict(self, from_state=None): raise exception.Conflict() for fun in [fake_save_not_found, fake_save_conflict]: request = unit_test_utils.get_fake_request( roles=['admin', 'member']) image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fun image.delete = mock.Mock() self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) self.assertTrue(image.delete.called) def test_upload_non_existent_image_raises_image_not_found_exception(self): def fake_save(self, from_state=None): raise exception.ImageNotFound() def fake_delete(): raise exception.ImageNotFound() request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fake_save image.delete = fake_delete self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_non_existent_image_raises_store_not_found_exception(self): def fake_save(self, from_state=None): raise glance_store.NotFound() def fake_delete(): raise exception.ImageNotFound() request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fake_save image.delete = fake_delete self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_non_existent_image_before_save(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.NotFound() self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_data_exists(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage() exc = exception.InvalidImageStatusTransition(cur_status='active', new_status='queued') image.set_data = Raise(exc) self.image_repo.result = image self.assertRaises(webob.exc.HTTPConflict, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) def test_upload_storage_full(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(glance_store.StorageFull) self.image_repo.result = image self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID2, 'YYYYYYY', 7) def test_upload_signature_verification_fails(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(cursive_exception.SignatureVerificationError) self.image_repo.result = image self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertEqual('queued', self.image_repo.saved_image.status) def test_image_size_limit_exceeded(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(exception.ImageSizeLimitExceeded) self.image_repo.result = image self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID1, 'YYYYYYY', 7) def test_upload_storage_quota_full(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.StorageQuotaFull("message") self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID1, 'YYYYYYY', 7) def test_upload_storage_forbidden(self): request = unit_test_utils.get_fake_request( user=unit_test_utils.USER2, roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(exception.Forbidden) self.image_repo.result = image self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) def test_upload_storage_internal_error(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.ServerError() self.assertRaises(exception.ServerError, self.controller.upload, request, unit_test_utils.UUID1, 'ABC', 3) def test_upload_storage_write_denied(self): request = unit_test_utils.get_fake_request( user=unit_test_utils.USER3, roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(glance_store.StorageWriteDenied) self.image_repo.result = image self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) def test_upload_storage_store_disabled(self): """Test that uploading an image file raises StoreDisabled exception""" request = unit_test_utils.get_fake_request( user=unit_test_utils.USER3, roles=['admin', 'member']) image = FakeImage() image.set_data = Raise(glance_store.StoreAddDisabled) self.image_repo.result = image self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) def _test_upload_download_prepare_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() prepare_payload = output['meta'].copy() prepare_payload['checksum'] = None prepare_payload['size'] = None prepare_payload['virtual_size'] = None prepare_payload['location'] = None prepare_payload['status'] = 'queued' del prepare_payload['updated_at'] prepare_log = { 'notification_type': "INFO", 'event_type': "image.prepare", 'payload': prepare_payload, } self.assertEqual(3, len(output_log)) prepare_updated_at = output_log[0]['payload']['updated_at'] del output_log[0]['payload']['updated_at'] self.assertLessEqual(prepare_updated_at, output['meta']['updated_at']) self.assertEqual(prepare_log, output_log[0]) def _test_upload_download_upload_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() upload_payload = output['meta'].copy() upload_log = { 'notification_type': "INFO", 'event_type': "image.upload", 'payload': upload_payload, } self.assertEqual(3, len(output_log)) self.assertEqual(upload_log, output_log[1]) def _test_upload_download_activate_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() activate_payload = output['meta'].copy() activate_log = { 'notification_type': "INFO", 'event_type': "image.activate", 'payload': activate_payload, } self.assertEqual(3, len(output_log)) self.assertEqual(activate_log, output_log[2]) def test_restore_image_when_upload_failed(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('fake') image.set_data = Raise(glance_store.StorageWriteDenied) self.image_repo.result = image self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.upload, request, unit_test_utils.UUID2, 'ZZZ', 3) self.assertEqual('queued', self.image_repo.saved_image.status) @mock.patch.object(filesystem.Store, 'add') def test_restore_image_when_staging_failed(self, mock_store_add): mock_store_add.side_effect = glance_store.StorageWriteDenied() request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image_id = str(uuid.uuid4()) image = FakeImage('fake') self.image_repo.result = image self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.stage, request, image_id, 'YYYYYYY', 7) self.assertEqual('queued', self.image_repo.saved_image.status) def test_stage(self): image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(filesystem.Store, 'add') as mock_add: mock_add.return_value = ('foo://bar', 4, 'ident', {}) self.controller.stage(request, image_id, 'YYYY', 4) self.assertEqual('uploading', image.status) self.assertEqual(4, image.size) def test_image_already_on_staging(self): image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(filesystem.Store, 'add') as mock_store_add: mock_store_add.return_value = ('foo://bar', 4, 'ident', {}) self.controller.stage(request, image_id, 'YYYY', 4) self.assertEqual('uploading', image.status) mock_store_add.side_effect = glance_store.Duplicate() self.assertEqual(4, image.size) self.assertRaises(webob.exc.HTTPConflict, self.controller.stage, request, image_id, 'YYYY', 4) @mock.patch.object(glance_store.driver.Store, 'configure') def test_image_stage_raises_bad_store_uri(self, mock_store_configure): mock_store_configure.side_effect = AttributeError() image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) self.assertRaises(exception.BadStoreUri, self.controller.stage, request, image_id, 'YYYY', 4) @mock.patch.object(filesystem.Store, 'add') def test_image_stage_raises_storage_full(self, mock_store_add): mock_store_add.side_effect = glance_store.StorageFull() image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(self.controller, "_unstage"): self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.stage, request, image_id, 'YYYYYYY', 7) @mock.patch.object(filesystem.Store, 'add') def test_image_stage_raises_storage_quota_full(self, mock_store_add): mock_store_add.side_effect = exception.StorageQuotaFull("message") image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(self.controller, "_unstage"): self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.stage, request, image_id, 'YYYYYYY', 7) @mock.patch.object(filesystem.Store, 'add') def test_image_stage_raises_storage_write_denied(self, mock_store_add): mock_store_add.side_effect = glance_store.StorageWriteDenied() image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(self.controller, "_unstage"): self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.stage, request, image_id, 'YYYYYYY', 7) def test_image_stage_raises_internal_error(self): image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request() self.image_repo.result = exception.ServerError() self.assertRaises(exception.ServerError, self.controller.stage, request, image_id, 'YYYYYYY', 7) def test_image_stage_non_existent_image(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.NotFound() self.assertRaises(webob.exc.HTTPNotFound, self.controller.stage, request, str(uuid.uuid4()), 'ABC', 3) @mock.patch.object(filesystem.Store, 'add') def test_image_stage_raises_image_size_exceeded(self, mock_store_add): mock_store_add.side_effect = exception.ImageSizeLimitExceeded() image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(self.controller, "_unstage"): self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.stage, request, image_id, 'YYYYYYY', 7) @mock.patch.object(filesystem.Store, 'add') def test_image_stage_invalid_image_transition(self, mock_store_add): image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(filesystem.Store, 'add') as mock_add: mock_add.return_value = ('foo://bar', 4, 'ident', {}) self.controller.stage(request, image_id, 'YYYY', 4) self.assertEqual('uploading', image.status) self.assertEqual(4, image.size) # try staging again mock_store_add.side_effect = exception.InvalidImageStatusTransition( cur_status='uploading', new_status='uploading') self.assertRaises(webob.exc.HTTPConflict, self.controller.stage, request, image_id, 'YYYY', 4) def _test_image_stage_records_host(self, expected_url): image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image with mock.patch.object(filesystem.Store, 'add') as mock_add: mock_add.return_value = ('foo://bar', 4, 'ident', {}) self.controller.stage(request, image_id, 'YYYY', 4) if expected_url is None: self.assertNotIn('os_glance_stage_host', image.extra_properties) else: self.assertEqual(expected_url, image.extra_properties['os_glance_stage_host']) def test_image_stage_records_host_unset(self): # Make sure we do not set a null staging host, if we are not configured # to support worker-to-worker communication. self._test_image_stage_records_host(None) def test_image_stage_records_host_public_endpoint(self): # Make sure we fall back to public_endpoint self.config(public_endpoint='http://lb.example.com') self._test_image_stage_records_host('http://lb.example.com') def test_image_stage_records_host_self_url(self): # Make sure worker_self_reference_url takes precedence self.config(worker_self_reference_url='http://worker1.example.com') self._test_image_stage_records_host('http://worker1.example.com') def test_image_stage_fail_does_not_set_host(self): # Make sure that if the store.add() fails, we do not claim to have the # image staged. self.config(public_endpoint='http://worker1.example.com') image_id = str(uuid.uuid4()) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage(image_id=image_id) self.image_repo.result = image exc_cls = glance_store.exceptions.StorageFull with mock.patch.object(filesystem.Store, 'add', side_effect=exc_cls): self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.stage, request, image_id, 'YYYY', 4) self.assertNotIn('os_glance_stage_host', image.extra_properties) class TestImageDataDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImageDataDeserializer, self).setUp() self.deserializer = glance.api.v2.image_data.RequestDeserializer() def test_upload(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' request.body = b'YYY' request.headers['Content-Length'] = 3 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 3} self.assertEqual(expected, output) def test_upload_chunked(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' # If we use body_file, webob assumes we want to do a chunked upload, # ignoring the Content-Length header request.body_file = io.StringIO('YYY') output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual('YYY', data.read()) expected = {'size': None} self.assertEqual(expected, output) def test_upload_chunked_with_content_length(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' request.body_file = io.BytesIO(b'YYY') # The deserializer shouldn't care if the Content-Length is # set when the user is attempting to send chunked data. request.headers['Content-Length'] = 3 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 3} self.assertEqual(expected, output) def test_upload_with_incorrect_content_length(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' # The deserializer shouldn't care if the Content-Length and # actual request body length differ. That job is left up # to the controller request.body = b'YYY' request.headers['Content-Length'] = 4 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 4} self.assertEqual(expected, output) def test_upload_wrong_content_type(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/json' request.body = b'YYYYY' self.assertRaises(webob.exc.HTTPUnsupportedMediaType, self.deserializer.upload, request) request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-st' request.body = b'YYYYY' self.assertRaises(webob.exc.HTTPUnsupportedMediaType, self.deserializer.upload, request) def test_stage(self): req = unit_test_utils.get_fake_request(roles=['admin', 'member']) req.headers['Content-Type'] = 'application/octet-stream' req.headers['Content-Length'] = 4 req.body_file = io.BytesIO(b'YYYY') output = self.deserializer.stage(req) data = output.pop('data') self.assertEqual(b'YYYY', data.read()) def test_stage_without_glance_direct(self): self.config(enabled_import_methods=['web-download']) req = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.deserializer.stage, req) def test_stage_raises_invalid_content_type(self): # TODO(abhishekk): change this when import methods are # listed in the config file req = unit_test_utils.get_fake_request() req.headers['Content-Type'] = 'application/json' self.assertRaises(webob.exc.HTTPUnsupportedMediaType, self.deserializer.stage, req) class TestImageDataSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImageDataSerializer, self).setUp() self.serializer = glance.api.v2.image_data.ResponseSerializer() def test_download(self): request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) self.serializer.download(response, image) self.assertEqual(b'ZZZ', response.body) self.assertEqual('3', response.headers['Content-Length']) self.assertNotIn('Content-MD5', response.headers) self.assertEqual('application/octet-stream', response.headers['Content-Type']) def test_range_requests_for_image_downloads(self): """ Test partial download 'Range' requests for images (random image access) """ def download_successful_Range(d_range): request = wsgi.Request.blank('/') request.environ = {} request.headers['Range'] = d_range response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) self.serializer.download(response, image) self.assertEqual(206, response.status_code) self.assertEqual('2', response.headers['Content-Length']) self.assertEqual('bytes 1-2/3', response.headers['Content-Range']) self.assertEqual(b'YZ', response.body) download_successful_Range('bytes=1-2') download_successful_Range('bytes=1-') download_successful_Range('bytes=1-3') download_successful_Range('bytes=-2') download_successful_Range('bytes=1-100') def full_image_download_w_range(d_range): request = wsgi.Request.blank('/') request.environ = {} request.headers['Range'] = d_range response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) self.serializer.download(response, image) self.assertEqual(206, response.status_code) self.assertEqual('3', response.headers['Content-Length']) self.assertEqual('bytes 0-2/3', response.headers['Content-Range']) self.assertEqual(b'XYZ', response.body) full_image_download_w_range('bytes=0-') full_image_download_w_range('bytes=0-2') full_image_download_w_range('bytes=0-3') full_image_download_w_range('bytes=-3') full_image_download_w_range('bytes=-4') full_image_download_w_range('bytes=0-100') full_image_download_w_range('bytes=-100') def download_failures_Range(d_range): request = wsgi.Request.blank('/') request.environ = {} request.headers['Range'] = d_range response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, self.serializer.download, response, image) return download_failures_Range('bytes=4-1') download_failures_Range('bytes=4-') download_failures_Range('bytes=3-') download_failures_Range('bytes=1') download_failures_Range('bytes=100') download_failures_Range('bytes=100-') download_failures_Range('bytes=') def test_multi_range_requests_raises_bad_request_error(self): request = wsgi.Request.blank('/') request.environ = {} request.headers['Range'] = 'bytes=0-0,-1' response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) self.assertRaises(webob.exc.HTTPBadRequest, self.serializer.download, response, image) def test_download_failure_with_valid_range(self): with mock.patch.object(glance.domain.proxy.Image, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.NotFound(image="image") request = wsgi.Request.blank('/') request.environ = {} request.headers['Range'] = 'bytes=1-2' response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPNoContent, self.serializer.download, response, image) def test_content_range_requests_for_image_downloads(self): """ Even though Content-Range is incorrect on requests, we support it for backward compatibility with clients written for pre-Pike Glance. The following test is for 'Content-Range' requests, which we have to ensure that we prevent regression. """ def download_successful_ContentRange(d_range): request = wsgi.Request.blank('/') request.environ = {} request.headers['Content-Range'] = d_range response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'X', b'Y', b'Z']) self.serializer.download(response, image) self.assertEqual(206, response.status_code) self.assertEqual('2', response.headers['Content-Length']) self.assertEqual('bytes 1-2/3', response.headers['Content-Range']) self.assertEqual(b'YZ', response.body) download_successful_ContentRange('bytes 1-2/3') download_successful_ContentRange('bytes 1-2/*') def download_failures_ContentRange(d_range): request = wsgi.Request.blank('/') request.environ = {} request.headers['Content-Range'] = d_range response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) self.assertRaises(webob.exc.HTTPRequestRangeNotSatisfiable, self.serializer.download, response, image) return download_failures_ContentRange('bytes -3/3') download_failures_ContentRange('bytes 1-/3') download_failures_ContentRange('bytes 1-3/3') download_failures_ContentRange('bytes 1-4/3') download_failures_ContentRange('bytes 1-4/*') download_failures_ContentRange('bytes 4-1/3') download_failures_ContentRange('bytes 4-1/*') download_failures_ContentRange('bytes 4-8/*') download_failures_ContentRange('bytes 4-8/10') download_failures_ContentRange('bytes 4-8/3') def test_download_failure_with_valid_content_range(self): with mock.patch.object(glance.domain.proxy.Image, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.NotFound(image="image") request = wsgi.Request.blank('/') request.environ = {} request.headers['Content-Range'] = 'bytes %s-%s/3' % (1, 2) response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPNoContent, self.serializer.download, response, image) def test_download_with_checksum(self): request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request checksum = '0745064918b49693cca64d6b6a13d28a' image = FakeImage(size=3, checksum=checksum, data=[b'Z', b'Z', b'Z']) self.serializer.download(response, image) self.assertEqual(b'ZZZ', response.body) self.assertEqual('3', response.headers['Content-Length']) self.assertEqual(checksum, response.headers['Content-MD5']) self.assertEqual('application/octet-stream', response.headers['Content-Type']) def test_download_forbidden(self): """Make sure the serializer can return 403 forbidden error instead of 500 internal server error. """ def get_data(*args, **kwargs): raise exception.Forbidden() self.mock_object(glance.domain.proxy.Image, 'get_data', get_data) request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = get_data self.assertRaises(webob.exc.HTTPForbidden, self.serializer.download, response, image) def test_download_no_content(self): """Test image download returns HTTPNoContent Make sure that serializer returns 204 no content error in case of image data is not available at specified location. """ with mock.patch.object(glance.domain.proxy.Image, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.NotFound(image="image") request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPNoContent, self.serializer.download, response, image) def test_download_service_unavailable(self): """Test image download returns HTTPServiceUnavailable.""" with mock.patch.object(glance.domain.proxy.Image, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.RemoteServiceUnavailable() request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPServiceUnavailable, self.serializer.download, response, image) def test_download_store_get_not_support(self): """Test image download returns HTTPBadRequest. Make sure that serializer returns 400 bad request error in case of getting images from this store is not supported at specified location. """ with mock.patch.object(glance.domain.proxy.Image, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.StoreGetNotSupported() request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPBadRequest, self.serializer.download, response, image) def test_download_store_random_get_not_support(self): """Test image download returns HTTPBadRequest. Make sure that serializer returns 400 bad request error in case of getting randomly images from this store is not supported at specified location. """ with mock.patch.object(glance.domain.proxy.Image, 'get_data') as m_get_data: err = glance_store.StoreRandomGetNotSupported(offset=0, chunk_size=0) m_get_data.side_effect = err request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = m_get_data self.assertRaises(webob.exc.HTTPBadRequest, self.serializer.download, response, image) def test_upload(self): request = webob.Request.blank('/') request.environ = {} response = webob.Response() response.request = request self.serializer.upload(response, {}) self.assertEqual(http.NO_CONTENT, response.status_int) self.assertEqual('0', response.headers['Content-Length']) def test_stage(self): request = webob.Request.blank('/') request.environ = {} response = webob.Response() response.request = request self.serializer.stage(response, {}) self.assertEqual(http.NO_CONTENT, response.status_int) self.assertEqual('0', response.headers['Content-Length']) class TestMultiBackendImagesController(base.MultiStoreClearingUnitTest): def setUp(self): super(TestMultiBackendImagesController, self).setUp() self.config(debug=True) self.image_repo = FakeImageRepo() db = unit_test_utils.FakeDB() policy = unit_test_utils.FakePolicyEnforcer() notifier = unit_test_utils.FakeNotifier() store = unit_test_utils.FakeStoreAPI() self.controller = glance.api.v2.image_data.ImageDataController() self.controller.gateway = FakeGateway(db, store, notifier, policy, self.image_repo) # FIXME(abhishekk): Everything is fake in this test, so mocked the # image muntable_check, Later we need to fix these tests to use # some realistic data patcher = mock.patch('glance.api.v2.policy.check_is_image_mutable') patcher.start() self.addCleanup(patcher.stop) def test_upload(self): request = unit_test_utils.get_fake_request(roles=['admin', 'member']) image = FakeImage('abcd') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) def test_upload_invalid_backend_in_request_header(self): request = unit_test_utils.get_fake_request() request.headers['x-image-meta-store'] = 'dummy' image = FakeImage('abcd') self.image_repo.result = image self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, request, unit_test_utils.UUID2, 'YYYY', 4) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_image_members_resource.py0000664000175000017500000005776200000000000025114 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client as http import glance_store from oslo_config import cfg from oslo_serialization import jsonutils import webob import glance.api.v2.image_members import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' CONF = cfg.CONF BASE_URI = unit_test_utils.BASE_URI UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' UUID5 = '3eee7cc2-eae7-4c0f-b50d-a7160b0c62ed' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'visibility': 'shared', 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, 'status': 'pending', } obj.update(kwargs) return obj def _domain_fixture(id, **kwargs): properties = { 'id': id, } properties.update(kwargs) return glance.domain.ImageMembership(**properties) class TestImageMembersController(test_utils.BaseTestCase): def setUp(self): super(TestImageMembersController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.store = unit_test_utils.FakeStoreAPI() self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self._create_images() self._create_image_members() self.controller = glance.api.v2.image_members.ImageMembersController( self.db, self.policy, self.notifier, self.store) glance_store.register_opts(CONF) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores() def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, name='1', size=256, visibility='public', locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}]), _db_fixture(UUID2, owner=TENANT1, name='2', size=512), _db_fixture(UUID3, owner=TENANT3, name='3', size=512), _db_fixture(UUID4, owner=TENANT4, name='4', size=1024), _db_fixture(UUID5, owner=TENANT1, name='5', size=1024), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID2, TENANT4), _db_image_member_fixture(UUID3, TENANT4), _db_image_member_fixture(UUID3, TENANT2), _db_image_member_fixture(UUID4, TENANT1), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_index(self): request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_index_no_members(self): request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID5) self.assertEqual(0, len(output['members'])) self.assertEqual({'members': []}, output) def test_index_member_view(self): # UUID3 is a shared image owned by TENANT3 # UUID3 has members TENANT2 and TENANT4 # When TENANT4 lists members for UUID3, should not see TENANT2 request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.index(request, UUID3) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_index_private_image(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, request, UUID5) def test_index_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request, UUID1) def test_index_private_image_visible_members_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, UUID4) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT1]) self.assertEqual(expected, actual) def test_index_allowed_by_get_members_policy(self): rules = {"get_members": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) def test_index_forbidden_by_get_members_policy(self): rules = {"get_members": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request, image_id=UUID2) def test_show(self): request = unit_test_utils.get_fake_request(tenant=TENANT1) output = self.controller.show(request, UUID2, TENANT4) expected = self.image_members[0] self.assertEqual(expected['image_id'], output.image_id) self.assertEqual(expected['member'], output.member_id) self.assertEqual(expected['status'], output.status) def test_show_by_member(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.show(request, UUID2, TENANT4) expected = self.image_members[0] self.assertEqual(expected['image_id'], output.image_id) self.assertEqual(expected['member'], output.member_id) self.assertEqual(expected['status'], output.status) def test_show_forbidden(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID2, TENANT4) def test_show_not_found(self): # one member should not be able to view status of another member # of the same image request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID3, TENANT4) def test_create(self): request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_create_allowed_by_add_policy(self): rules = {"add_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() output = self.controller.create(request, image_id=UUID2, member_id=TENANT3) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_create_forbidden_by_add_policy(self): rules = {"add_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image_id=UUID2, member_id=TENANT3) def test_create_duplicate_member(self): request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, request, image_id=image_id, member_id=member_id) def test_create_overlimit(self): self.config(image_member_quota=0) request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image_id=image_id, member_id=member_id) def test_create_unlimited(self): self.config(image_member_quota=-1) request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_member_create_raises_bad_request_for_unicode_value(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image_id=UUID5, member_id='\U0001f693') def test_update_done_by_member(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) image_id = UUID2 member_id = TENANT4 output = self.controller.update(request, image_id=image_id, member_id=member_id, status='accepted') self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT4, output.member_id) self.assertEqual('accepted', output.status) def test_update_done_by_member_forbidden_by_policy(self): rules = {"modify_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT4) self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image_id=UUID2, member_id=TENANT4, status='accepted') def test_update_done_by_member_allowed_by_policy(self): rules = {"modify_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.update(request, image_id=UUID2, member_id=TENANT4, status='accepted') self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT4, output.member_id) self.assertEqual('accepted', output.status) def test_update_done_by_owner(self): enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "'{0}':%(owner)s".format(TENANT1) }) self.controller.policy = enforcer request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID2, TENANT4, status='accepted') def test_update_non_existent_image(self): request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, '123', TENANT4, status='accepted') def test_update_invalid_status(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID2, TENANT4, status='accept') def test_create_private_image(self): enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", }) self.controller.policy = enforcer request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, UUID4, TENANT2) def test_create_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, UUID1, TENANT2) def test_create_image_does_not_exist(self): request = unit_test_utils.get_fake_request() image_id = 'fake-image-id' member_id = TENANT3 self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, request, image_id=image_id, member_id=member_id) def test_delete(self): request = unit_test_utils.get_fake_request() member_id = TENANT4 image_id = UUID2 res = self.controller.delete(request, image_id, member_id) self.assertEqual(b'', res.body) self.assertEqual(http.NO_CONTENT, res.status_code) found_member = self.db.image_member_find( request.context, image_id=image_id, member=member_id) self.assertEqual([], found_member) def test_delete_by_member(self): enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "delete_member": "'{0}':%(owner)s".format(TENANT4), "get_members": "", "get_member": "" }) request = unit_test_utils.get_fake_request(tenant=TENANT4) self.controller.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_delete_allowed_by_policies(self): rules = {"get_member": True, "delete_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) output = self.controller.delete(request, image_id=UUID2, member_id=TENANT4) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(0, len(output['members'])) def test_delete_forbidden_by_get_member_policy(self): rules = {"get_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) def test_delete_forbidden_by_delete_member_policy(self): rules = {"delete_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) def test_delete_private_image(self): enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "delete_member": "'{0}':%(owner)s".format(TENANT1), "get_member": "" }) self.controller.policy = enforcer request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID4, TENANT1) def test_delete_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID1, TENANT1) def test_delete_image_does_not_exist(self): request = unit_test_utils.get_fake_request() member_id = TENANT2 image_id = 'fake-image-id' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, image_id, member_id) def test_delete_member_does_not_exist(self): request = unit_test_utils.get_fake_request() member_id = 'fake-member-id' image_id = UUID2 found_member = self.db.image_member_find( request.context, image_id=image_id, member=member_id) self.assertEqual([], found_member) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, image_id, member_id) class TestImageMembersSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImageMembersSerializer, self).setUp() self.serializer = glance.api.v2.image_members.ResponseSerializer() self.fixtures = [ _domain_fixture(id='1', image_id=UUID2, member_id=TENANT1, status='accepted', created_at=DATETIME, updated_at=DATETIME), _domain_fixture(id='2', image_id=UUID2, member_id=TENANT2, status='pending', created_at=DATETIME, updated_at=DATETIME), ] def test_index(self): expected = { 'members': [ { 'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', }, { 'image_id': UUID2, 'member_id': TENANT2, 'status': 'pending', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', }, ], 'schema': '/v2/schemas/members', } request = webob.Request.blank('/v2/images/%s/members' % UUID2) response = webob.Response(request=request) result = {'members': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show(self): expected = { 'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', } request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.show(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): expected = {'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'schema': '/v2/schemas/member', 'created_at': ISOTIME, 'updated_at': ISOTIME} request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.create(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_update(self): expected = {'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'schema': '/v2/schemas/member', 'created_at': ISOTIME, 'updated_at': ISOTIME} request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.update(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) class TestImagesDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializer, self).setUp() self.deserializer = glance.api.v2.image_members.RequestDeserializer() def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'member': TENANT1}) output = self.deserializer.create(request) expected = {'member_id': TENANT1} self.assertEqual(expected, output) def test_create_invalid(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_member_empty(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'member': ''}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_list_return_error(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes([TENANT1]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_list_return_error(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes([TENANT1]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'status': 'accepted'}) output = self.deserializer.update(request) expected = {'status': 'accepted'} self.assertEqual(expected, output) def test_update_invalid(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_image_tags_resource.py0000664000175000017500000001013400000000000024376 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http import webob import glance.api.v2.image_tags from glance.common import exception from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.unit.v2.test_image_data_resource as image_data_tests import glance.tests.utils as test_utils class TestImageTagsController(base.IsolatedUnitTest): def setUp(self): super(TestImageTagsController, self).setUp() self.db = unit_test_utils.FakeDB() self.controller = glance.api.v2.image_tags.Controller(self.db) def test_create_tag(self): request = unit_test_utils.get_fake_request() self.controller.update(request, unit_test_utils.UUID1, 'dink') context = request.context tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) def test_create_too_many_tags(self): self.config(image_tag_quota=0) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, unit_test_utils.UUID1, 'dink') def test_create_duplicate_tag_ignored(self): request = unit_test_utils.get_fake_request() self.controller.update(request, unit_test_utils.UUID1, 'dink') self.controller.update(request, unit_test_utils.UUID1, 'dink') context = request.context tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) def test_update_tag_of_non_existing_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, "abcd", "dink") def test_delete_tag_forbidden(self): def fake_get(self): raise exception.Forbidden() image_repo = image_data_tests.FakeImageRepo() image_repo.get = fake_get def get_fake_repo(self): return image_repo self.controller.gateway.get_repo = get_fake_repo request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, unit_test_utils.UUID1, "ping") def test_delete_tag(self): request = unit_test_utils.get_fake_request() self.controller.delete(request, unit_test_utils.UUID1, 'ping') def test_delete_tag_not_found(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, unit_test_utils.UUID1, 'what') def test_delete_tag_of_non_existing_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, "abcd", "dink") class TestImagesSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializer, self).setUp() self.serializer = glance.api.v2.image_tags.ResponseSerializer() def test_create_tag(self): response = webob.Response() self.serializer.update(response, None) self.assertEqual(http.NO_CONTENT, response.status_int) def test_delete_tag(self): response = webob.Response() self.serializer.delete(response, None) self.assertEqual(http.NO_CONTENT, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_images_resource.py0000664000175000017500000112516100000000000023553 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import http.client as http import os import requests from unittest import mock import uuid from castellan.common import exception as castellan_exception import glance_store as store from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import fixture import testtools import webob import webob.exc import glance.api.v2.image_actions import glance.api.v2.images from glance.common import exception from glance.common import store_utils from glance.common import timeutils from glance import domain import glance.notifier import glance.schema from glance.tests.unit import base from glance.tests.unit.keymgr import fake as fake_keymgr import glance.tests.unit.utils as unit_test_utils from glance.tests.unit.v2 import test_tasks_resource import glance.tests.utils as test_utils CONF = cfg.CONF DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' BASE_URI = unit_test_utils.BASE_URI UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' UUID5 = '13c58ac4-210d-41ab-8cdb-1adfe4610019' UUID6 = '6d33fd0f-2438-4419-acd0-ce1d452c97a0' UUID7 = '75ddbc84-9427-4f3b-8d7d-b0fd0543d9a8' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' CHKSUM1 = '43254c3edf6972c9f1cb309543d38a8c' FAKEHASHALGO = 'fake-name-for-sha512' MULTIHASH1 = hashlib.sha512(b'glance').hexdigest() MULTIHASH2 = hashlib.sha512(b'image_service').hexdigest() TASK_ID_1 = 'b3006bd0-461e-4228-88ea-431c14e918b4' TASK_ID_2 = '07b6b562-6770-4c8b-a649-37a515144ce9' TASK_ID_3 = '72d16bb6-4d70-48a5-83fe-14bb842dc737' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'visibility': 'shared', 'properties': {}, 'checksum': None, 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj def _db_task_fixtures(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': kwargs.get('status', 'pending'), 'type': 'import', 'input': kwargs.get('input', {}), 'result': None, 'owner': None, 'image_id': kwargs.get('image_id'), 'user_id': kwargs.get('user_id'), 'request_id': kwargs.get('request_id'), 'message': None, 'expires_at': default_datetime + datetime.timedelta(days=365), 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj def _domain_fixture(id, **kwargs): properties = { 'image_id': id, 'name': None, 'visibility': 'private', 'checksum': None, 'os_hash_algo': None, 'os_hash_value': None, 'owner': None, 'status': 'queued', 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'min_ram': None, 'min_disk': None, 'tags': [], } properties.update(kwargs) return glance.domain.Image(**properties) def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, } obj.update(kwargs) return obj class FakeImage(object): def __init__(self, id=None, status='active', container_format='ami', disk_format='ami', locations=None): self.id = id or UUID4 self.status = status self.container_format = container_format self.disk_format = disk_format self.locations = locations self.owner = unit_test_utils.TENANT1 self.created_at = '' self.updated_at = '' self.min_disk = '' self.min_ram = '' self.protected = False self.checksum = '' self.os_hash_algo = '' self.os_hash_value = '' self.size = 0 self.virtual_size = 0 self.visibility = 'public' self.os_hidden = False self.name = 'foo' self.tags = [] self.extra_properties = {} self.member = self.owner # NOTE(danms): This fixture looks more like the db object than # the proxy model. This needs fixing all through the tests # below. self.image_id = self.id class TestImagesController(base.IsolatedUnitTest): def setUp(self): super(TestImagesController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() for i in range(1, 4): self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) self.store_utils = unit_test_utils.FakeStoreUtils(self.store) self._create_images() self._create_image_members() self.controller = glance.api.v2.images.ImagesController(self.db, self.policy, self.notifier, self.store) self.action_controller = (glance.api.v2.image_actions. ImageActionsController(self.db, self.policy, self.notifier, self.store)) self.controller.gateway.store_utils = self.store_utils self.controller._key_manager = fake_keymgr.fake_api() store.create_stores() def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, name='1', size=256, virtual_size=1024, visibility='public', locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}], disk_format='raw', container_format='bare', status='active', created_at=DATETIME, updated_at=DATETIME), _db_fixture(UUID2, owner=TENANT1, checksum=CHKSUM1, os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH2, name='2', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, created_at=DATETIME + datetime.timedelta(seconds=1), updated_at=DATETIME + datetime.timedelta(seconds=1)), _db_fixture(UUID3, owner=TENANT3, checksum=CHKSUM1, os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH2, name='3', size=512, virtual_size=2048, visibility='public', tags=['windows', '64bit', 'x86'], created_at=DATETIME + datetime.timedelta(seconds=2), updated_at=DATETIME + datetime.timedelta(seconds=2)), _db_fixture(UUID4, owner=TENANT4, name='4', size=1024, virtual_size=3072, created_at=DATETIME + datetime.timedelta(seconds=3), updated_at=DATETIME + datetime.timedelta(seconds=3)), ] [self.db.image_create(None, image) for image in self.images] # Create tasks associated with image self.tasks = [ _db_task_fixtures( TASK_ID_1, image_id=UUID1, status='completed', input={ "image_id": UUID1, "import_req": { "method": { "name": "glance-direct" }, "backend": ["fake-store"] }, }, user_id='fake-user-id', request_id='fake-request-id', ), _db_task_fixtures( TASK_ID_2, image_id=UUID1, status='completed', input={ "image_id": UUID1, "import_req": { "method": { "name": "copy-image" }, "all_stores": True, "all_stores_must_succeed": False, "backend": ["fake-store", "fake_store_1"] }, }, user_id='fake-user-id', request_id='fake-request-id', ), _db_task_fixtures( TASK_ID_3, status='completed', input={ "image_id": UUID2, "import_req": { "method": { "name": "glance-direct" }, "backend": ["fake-store"] }, }, ), ] [self.db.task_create(None, task) for task in self.tasks] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID4, TENANT2), _db_image_member_fixture(UUID4, TENANT3, status='accepted'), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_index(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_member_status_accepted(self): self.config(limit_param_default=5, api_limit_max=5) request = unit_test_utils.get_fake_request(tenant=TENANT2) output = self.controller.index(request) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) # can see only the public image self.assertEqual(expected, actual) request = unit_test_utils.get_fake_request(tenant=TENANT3) output = self.controller.index(request) self.assertEqual(4, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3, UUID4]) self.assertEqual(expected, actual) def test_index_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request) self.assertEqual(4, len(output['images'])) def test_index_admin_deleted_images_hidden(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) output = self.controller.index(request) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3, UUID4]) self.assertEqual(expected, actual) def test_index_return_parameters(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID3, limit=1, sort_key=['created_at'], sort_dir=['desc']) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2]) self.assertEqual(actual, expected) self.assertEqual(UUID2, output['next_marker']) def test_index_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID3, limit=2) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID1]) self.assertEqual(expected, actual) self.assertEqual(UUID1, output['next_marker']) def test_index_no_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID1, limit=2) self.assertEqual(0, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([]) self.assertEqual(expected, actual) self.assertNotIn('next_marker', output) def test_index_marker_would_be_disallowed(self): self.config(limit_param_default=1, api_limit_max=10) request = unit_test_utils.get_fake_request(is_admin=True) def fake_enforce(context, action, target=None, **kw): assert target is not None if target['project_id'] != TENANT1: raise exception.Forbidden() # As admin, list three images. By default, this should leave # us on UUID3 (and as next_marker), which is owned by TENANT3 output = self.controller.index(request, sort_dir=['asc'], limit=3) self.assertEqual(UUID3, output['next_marker']) self.assertEqual(3, len(output['images'])) # Now sub in our fake policy that restricts us to TENANT1 images only. # Even though we list with limit=3, we should only get two images back, # and our next_marker should be UUID2 because we couldn't see UUID3. with mock.patch.object(self.controller.policy, 'enforce', new=fake_enforce): output = self.controller.index(request, sort_dir=['asc'], limit=3) self.assertEqual(UUID2, output['next_marker']) self.assertEqual(2, len(output['images'])) def test_index_with_id_filter(self): request = unit_test_utils.get_fake_request('/images?id=%s' % UUID1) output = self.controller.index(request, filters={'id': UUID1}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_invalid_hidden_filter(self): request = unit_test_utils.get_fake_request('/images?os_hidden=abcd') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, filters={'os_hidden': 'abcd'}) def test_index_with_checksum_filter_single_image(self): req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM) output = self.controller.index(req, filters={'checksum': CHKSUM}) self.assertEqual(1, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID1] self.assertEqual(expected, actual) def test_index_with_checksum_filter_multiple_images(self): req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM1) output = self.controller.index(req, filters={'checksum': CHKSUM1}) self.assertEqual(2, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID3, UUID2] self.assertEqual(expected, actual) def test_index_with_non_existent_checksum(self): req = unit_test_utils.get_fake_request('/images?checksum=236231827') output = self.controller.index(req, filters={'checksum': '236231827'}) self.assertEqual(0, len(output['images'])) def test_index_with_os_hash_value_filter_single_image(self): req = unit_test_utils.get_fake_request( '/images?os_hash_value=%s' % MULTIHASH1) output = self.controller.index(req, filters={'os_hash_value': MULTIHASH1}) self.assertEqual(1, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID1] self.assertEqual(expected, actual) def test_index_with_os_hash_value_filter_multiple_images(self): req = unit_test_utils.get_fake_request( '/images?os_hash_value=%s' % MULTIHASH2) output = self.controller.index(req, filters={'os_hash_value': MULTIHASH2}) self.assertEqual(2, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID3, UUID2] self.assertEqual(expected, actual) def test_index_with_non_existent_os_hash_value(self): fake_hash_value = hashlib.sha512(b'not_used_in_fixtures').hexdigest() req = unit_test_utils.get_fake_request( '/images?os_hash_value=%s' % fake_hash_value) output = self.controller.index(req, filters={'checksum': fake_hash_value}) self.assertEqual(0, len(output['images'])) def test_index_size_max_filter(self): request = unit_test_utils.get_fake_request('/images?size_max=512') output = self.controller.index(request, filters={'size_max': 512}) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_size_min_filter(self): request = unit_test_utils.get_fake_request('/images?size_min=512') output = self.controller.index(request, filters={'size_min': 512}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_size_range_filter(self): path = '/images?size_min=512&size_max=512' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'size_min': 512, 'size_max': 512}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_max_filter(self): ref = '/images?virtual_size_max=2048' request = unit_test_utils.get_fake_request(ref) output = self.controller.index(request, filters={'virtual_size_max': 2048}) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_min_filter(self): ref = '/images?virtual_size_min=2048' request = unit_test_utils.get_fake_request(ref) output = self.controller.index(request, filters={'virtual_size_min': 2048}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_range_filter(self): path = '/images?virtual_size_min=512&virtual_size_max=2048' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'virtual_size_min': 2048, 'virtual_size_max': 2048}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_with_invalid_max_range_filter_value(self): request = unit_test_utils.get_fake_request('/images?size_max=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, filters={'size_max': 'blah'}) def test_index_with_filters_return_many(self): path = '/images?status=queued' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'status': 'queued'}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_with_nonexistent_name_filter(self): request = unit_test_utils.get_fake_request('/images?name=%s' % 'blah') images = self.controller.index(request, filters={'name': 'blah'})['images'] self.assertEqual(0, len(images)) def test_index_with_non_default_is_public_filter(self): private_uuid = str(uuid.uuid4()) new_image = _db_fixture(private_uuid, visibility='private', owner=TENANT3) self.db.image_create(None, new_image) path = '/images?visibility=private' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, filters={'visibility': 'private'}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([private_uuid]) self.assertEqual(expected, actual) path = '/images?visibility=shared' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, filters={'visibility': 'shared'}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID4]) self.assertEqual(expected, actual) def test_index_with_many_filters(self): url = '/images?status=queued&name=3' request = unit_test_utils.get_fake_request(url) output = self.controller.index(request, filters={ 'status': 'queued', 'name': '3', }) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_with_marker(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, marker=UUID3) actual = set([image.image_id for image in output['images']]) self.assertEqual(1, len(actual)) self.assertIn(UUID2, actual) def test_index_with_limit(self): path = '/images' limit = 2 request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, limit=limit) actual = set([image.image_id for image in output['images']]) self.assertEqual(limit, len(actual)) self.assertIn(UUID3, actual) self.assertIn(UUID2, actual) def test_index_greater_than_limit_max(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, limit=4) actual = set([image.image_id for image in output['images']]) self.assertEqual(3, len(actual)) self.assertNotIn(output['next_marker'], output) def test_index_default_limit(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request) actual = set([image.image_id for image in output['images']]) self.assertEqual(1, len(actual)) def test_index_with_sort_dir(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_dir=['asc'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID1, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID3, actual[2]) def test_index_with_sort_key(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_key=['created_at'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID3, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID1, actual[2]) def test_index_with_multiple_sort_keys(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_key=['created_at', 'name'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID3, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID1, actual[2]) def test_index_with_marker_not_found(self): fake_uuid = str(uuid.uuid4()) path = '/images' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_index_invalid_sort_key(self): path = '/images' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, sort_key=['foo']) def test_index_zero_images(self): self.db.reset() request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual([], output['images']) def test_index_with_tags(self): path = '/images?tag=64bit' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['64bit']}) actual = [image.tags for image in output['images']] self.assertEqual(2, len(actual)) self.assertIn('64bit', actual[0]) self.assertIn('64bit', actual[1]) def test_index_with_multi_tags(self): path = '/images?tag=power&tag=64bit' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['power', '64bit']}) actual = [image.tags for image in output['images']] self.assertEqual(1, len(actual)) self.assertIn('64bit', actual[0]) self.assertIn('power', actual[0]) def test_index_with_multi_tags_and_nonexistent(self): path = '/images?tag=power&tag=fake' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['power', 'fake']}) actual = [image.tags for image in output['images']] self.assertEqual(0, len(actual)) def test_index_with_tags_and_properties(self): path = '/images?tag=64bit&hypervisor_type=kvm' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['64bit'], 'hypervisor_type': 'kvm'}) tags = [image.tags for image in output['images']] properties = [image.extra_properties for image in output['images']] self.assertEqual(len(tags), len(properties)) self.assertIn('64bit', tags[0]) self.assertEqual('kvm', properties[0]['hypervisor_type']) def test_index_with_multiple_properties(self): path = '/images?foo=bar&hypervisor_type=kvm' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'foo': 'bar', 'hypervisor_type': 'kvm'}) properties = [image.extra_properties for image in output['images']] self.assertEqual('kvm', properties[0]['hypervisor_type']) self.assertEqual('bar', properties[0]['foo']) def test_index_with_core_and_extra_property(self): path = '/images?disk_format=raw&foo=bar' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'foo': 'bar', 'disk_format': 'raw'}) properties = [image.extra_properties for image in output['images']] self.assertEqual(1, len(output['images'])) self.assertEqual('raw', output['images'][0].disk_format) self.assertEqual('bar', properties[0]['foo']) def test_index_with_nonexistent_properties(self): path = '/images?abc=xyz&pudding=banana' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'abc': 'xyz', 'pudding': 'banana'}) self.assertEqual(0, len(output['images'])) def test_index_with_non_existent_tags(self): path = '/images?tag=fake' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['fake']}) actual = [image.tags for image in output['images']] self.assertEqual(0, len(actual)) def test_show(self): request = unit_test_utils.get_fake_request() output = self.controller.show(request, image_id=UUID2) self.assertEqual(UUID2, output.image_id) self.assertEqual('2', output.name) def test_show_deleted_properties(self): """Ensure that the api filters out deleted image properties.""" # get the image properties into the odd state image = { 'id': str(uuid.uuid4()), 'status': 'active', 'properties': {'poo': 'bear'}, } self.db.image_create(None, image) self.db.image_update(None, image['id'], {'properties': {'yin': 'yang'}}, purge_props=True) request = unit_test_utils.get_fake_request() output = self.controller.show(request, image['id']) self.assertEqual('yang', output.extra_properties['yin']) def test_show_non_existent(self): request = unit_test_utils.get_fake_request() image_id = str(uuid.uuid4()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, image_id) def test_show_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID1) def test_show_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertEqual(TENANT1, request.context.project_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID4) def test_show_not_allowed_by_policy(self): # Use admin so that we get past the check buried in the DB and # only hit the policy check we are mocking. request = unit_test_utils.get_fake_request(is_admin=True) with mock.patch.object(self.controller.policy, 'enforce') as mock_enf: mock_enf.side_effect = webob.exc.HTTPForbidden() # Make sure we get NotFound instead of Forbidden exc = self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID4) # Make sure we did not leak details of the original Forbidden # error into the NotFound returned to the client. self.assertEqual('The resource could not be found.', str(exc)) def test_get_task_info(self): request = unit_test_utils.get_fake_request() output = self.controller.get_task_info(request, image_id=UUID1) # NOTE Here we have only tasks associated with the image and not # other task which has not stored image_id, user_id and # request_id in tasks database table. self.assertEqual(2, len(output['tasks'])) for task in output['tasks']: self.assertEqual(UUID1, task['image_id']) self.assertEqual('fake-user-id', task['user_id']) self.assertEqual('fake-request-id', task['request_id']) def test_get_task_info_no_tasks(self): request = unit_test_utils.get_fake_request() output = self.controller.get_task_info(request, image_id=UUID2) self.assertEqual([], output['tasks']) def test_get_task_info_raises_not_found(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.get_task_info, request, 'fake-image-id') def test_image_import_raises_conflict_if_container_format_is_none(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(container_format=None) self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict_if_disk_format_is_none(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(disk_format=None) self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict_for_web_download(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'web-download'}}) def test_image_import_raises_conflict_for_invalid_status_change(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch('glance.api.common.get_thread_pool') def test_image_import_raises_bad_request(self, mock_gpt, mock_spa): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') # NOTE(abhishekk): Due to # https://bugs.launchpad.net/glance/+bug/1712463 taskflow is not # executing. Once it is fixed instead of mocking spawn method # we should mock execute method of _ImportToStore task. mock_gpt.return_value.spawn.side_effect = ValueError self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) self.assertTrue(mock_gpt.return_value.spawn.called) def test_image_import_invalid_uri_filtering(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID4, {'method': {'name': 'web-download', 'uri': 'fake_uri'}}) def test_image_import_raises_bad_request_for_glance_download_missing_input( self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-download'}}) def test_image_import_raise_bad_request_wrong_id_for_glance_download( self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-download', 'glance_image_id': 'fake_id', 'glance_region': 'REGION4'}}) @mock.patch.object(glance.domain.TaskFactory, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_add_default_service_endpoint_for_glance_download( self, mock_get, mock_nt): request = unit_test_utils.get_fake_request() mock_get.return_value = FakeImage(status='queued') body = {'method': {'name': 'glance-download', 'glance_image_id': UUID4, 'glance_region': 'REGION2'}} self.controller.import_image(request, UUID4, body) expected_req = {'method': {'name': 'glance-download', 'glance_image_id': UUID4, 'glance_region': 'REGION2', 'glance_service_interface': 'public'}} self.assertEqual(expected_req, mock_nt.call_args.kwargs['task_input']['import_req']) @mock.patch('glance.context.get_ksa_client') def test_image_import_proxies(self, mock_client): # Make sure that we proxy to the remote side when we need to self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s/import' % UUID4) with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') mock_get.return_value.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') remote_hdrs = {'x-openstack-request-id': 'remote-req'} mock_resp = mock.MagicMock(location='/target', status_code=202, reason='Thanks', headers=remote_hdrs) mock_client.return_value.post.return_value = mock_resp r = self.controller.import_image( request, UUID4, {'method': {'name': 'glance-direct'}}) # Make sure we returned the ID like expected normally self.assertEqual(UUID4, r) # Make sure we called the expected remote URL and passed # the body. mock_client.return_value.post.assert_called_once_with( ('https://glance-worker1.openstack.org' '/v2/images/%s/import') % UUID4, json={'method': {'name': 'glance-direct'}}, timeout=60) # Make sure the remote request-id is returned to us self.assertEqual('remote-req', request.context.request_id) @mock.patch('glance.context.get_ksa_client') def test_image_delete_proxies(self, mock_client): # Make sure that we proxy to the remote side when we need to self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s' % UUID4, method='DELETE') with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') mock_get.return_value.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') remote_hdrs = {'x-openstack-request-id': 'remote-req'} mock_resp = mock.MagicMock(location='/target', status_code=202, reason='Thanks', headers=remote_hdrs) mock_client.return_value.delete.return_value = mock_resp self.controller.delete(request, UUID4) # Make sure we called the expected remote URL and passed # the body. mock_client.return_value.delete.assert_called_once_with( ('https://glance-worker1.openstack.org' '/v2/images/%s') % UUID4, json=None, timeout=60) @mock.patch('glance.context.get_ksa_client') def test_image_import_proxies_error(self, mock_client): # Make sure that errors from the remote worker are proxied to our # client with the proper code and message self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s/import' % UUID4) with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') mock_get.return_value.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') mock_resp = mock.MagicMock(location='/target', status_code=456, reason='No thanks') mock_client.return_value.post.return_value = mock_resp exc = self.assertRaises(webob.exc.HTTPError, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) self.assertEqual('456 No thanks', exc.status) mock_client.return_value.post.assert_called_once_with( ('https://glance-worker1.openstack.org' '/v2/images/%s/import') % UUID4, json={'method': {'name': 'glance-direct'}}, timeout=60) @mock.patch('glance.context.get_ksa_client') def test_image_delete_proxies_error(self, mock_client): # Make sure that errors from the remote worker are proxied to our # client with the proper code and message self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s' % UUID4, method='DELETE') with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') mock_get.return_value.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') remote_hdrs = {'x-openstack-request-id': 'remote-req'} mock_resp = mock.MagicMock(location='/target', status_code=456, reason='No thanks', headers=remote_hdrs) mock_client.return_value.delete.return_value = mock_resp exc = self.assertRaises(webob.exc.HTTPError, self.controller.delete, request, UUID4) self.assertEqual('456 No thanks', exc.status) # Make sure we called the expected remote URL and passed # the body. mock_client.return_value.delete.assert_called_once_with( ('https://glance-worker1.openstack.org' '/v2/images/%s') % UUID4, json=None, timeout=60) @mock.patch('glance.context.get_ksa_client') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') @mock.patch.object(glance.notifier.ImageRepoProxy, 'remove') def test_image_delete_deletes_locally_on_error(self, mock_remove, mock_get, mock_client): # Make sure that if the proxy delete fails due to a connection error # that we continue with the delete ourselves. self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s' % UUID4, method='DELETE') image = FakeImage(status='uploading') mock_get.return_value = image image.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') image.delete = mock.MagicMock() mock_client.return_value.delete.side_effect = ( requests.exceptions.ConnectTimeout) self.controller.delete(request, UUID4) # Make sure we called delete on our image mock_get.return_value.delete.assert_called_once_with() mock_remove.assert_called_once_with(image) # Make sure we called the expected remote URL and passed # the body. mock_client.return_value.delete.assert_called_once_with( ('https://glance-worker1.openstack.org' '/v2/images/%s') % UUID4, json=None, timeout=60) @mock.patch('glance.context.get_ksa_client') def test_image_import_no_proxy_non_direct(self, mock_client): # Make sure that we won't take the proxy path for import methods # other than glance-direct self.config( worker_self_reference_url='http://glance-worker2.openstack.org') request = unit_test_utils.get_fake_request( '/v2/images/%s/import' % UUID4) with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') mock_get.return_value.extra_properties['os_glance_stage_host'] = ( 'https://glance-worker1.openstack.org') # This will fail validation after the point at which we would # have proxied to the remote side, just to avoid task setup. self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID4, {'method': {'name': 'web-download', 'url': 'not-a-url'}}) # Make sure we did not try to proxy this web-download request mock_client.return_value.post.assert_not_called() def test_create(self): request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual({}, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('shared', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual('image-1', output_log['payload']['name']) def test_create_disabled_notification(self): self.config(disabled_notifications=["image.create"]) request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual({}, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('shared', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_create_with_properties(self): request = unit_test_utils.get_fake_request() image_properties = {'foo': 'bar'} image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties=image_properties, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual(image_properties, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('shared', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual('image-1', output_log['payload']['name']) def test_create_with_too_many_properties(self): self.config(image_property_quota=1) request = unit_test_utils.get_fake_request() image_properties = {'foo': 'bar', 'foo2': 'bar'} image = {'name': 'image-1'} self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_with_bad_min_disk_size(self): request = unit_test_utils.get_fake_request() image = {'min_disk': -42, 'name': 'image-1'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_with_bad_min_ram_size(self): request = unit_test_utils.get_fake_request() image = {'min_ram': -42, 'name': 'image-1'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_public_image_as_admin(self): request = unit_test_utils.get_fake_request() image = {'name': 'image-1', 'visibility': 'public'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('public', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(output.image_id, output_log['payload']['id']) def test_create_dup_id(self): request = unit_test_utils.get_fake_request() image = {'image_id': UUID4} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_duplicate_tags(self): request = unit_test_utils.get_fake_request() tags = ['ping', 'ping'] output = self.controller.create(request, image={}, extra_properties={}, tags=tags) self.assertEqual(set(['ping']), output.tags) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(output.image_id, output_log['payload']['id']) def test_create_with_too_many_tags(self): self.config(image_tag_quota=1) request = unit_test_utils.get_fake_request() tags = ['ping', 'pong'] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image={}, extra_properties={}, tags=tags) def test_create_with_owner_non_admin(self): enforcer = unit_test_utils.enforcer_from_rules({ "add_image": "role:member,reader", }) request = unit_test_utils.get_fake_request() request.context.is_admin = False image = {'owner': '12345'} self.controller.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties={}, tags=[]) enforcer = unit_test_utils.enforcer_from_rules({ "add_image": "'{0}':%(owner)s".format(TENANT1), }) request = unit_test_utils.get_fake_request() request.context.is_admin = False image = {'owner': TENANT1} self.controller.policy = enforcer output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual(TENANT1, output.owner) def test_create_with_owner_admin(self): request = unit_test_utils.get_fake_request() request.context.is_admin = True image = {'owner': '12345'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('12345', output.owner) def test_create_with_duplicate_location(self): request = unit_test_utils.get_fake_request() location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} image = {'name': 'image-1', 'locations': [location, location]} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_unexpected_property(self): request = unit_test_utils.get_fake_request() image_properties = {'unexpected': 'unexpected'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=TypeError): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_reserved_property(self): request = unit_test_utils.get_fake_request() image_properties = {'reserved': 'reserved'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=exception.ReservedProperty( property='reserved')): self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_readonly_property(self): request = unit_test_utils.get_fake_request() image_properties = {'readonly': 'readonly'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=exception.ReadonlyProperty( property='readonly')): self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_update_no_changes(self): request = unit_test_utils.get_fake_request() output = self.controller.update(request, UUID1, changes=[]) self.assertEqual(UUID1, output.image_id) self.assertEqual(output.created_at, output.updated_at) self.assertEqual(2, len(output.tags)) self.assertIn('ping', output.tags) self.assertIn('pong', output.tags) output_logs = self.notifier.get_logs() # NOTE(markwash): don't send a notification if nothing is updated self.assertEqual(0, len(output_logs)) def test_update_queued_image_with_hidden(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['os_hidden'], 'value': 'true'}] image = self.controller.update(request, UUID1, changes=changes) self.assertTrue(image.os_hidden) def test_update_with_bad_min_disk(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['min_disk'], 'value': -42}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes=changes) def test_update_with_bad_min_ram(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['min_ram'], 'value': -42}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes=changes) def test_update_image_doesnt_exist(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, str(uuid.uuid4()), changes=[]) def test_update_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, UUID1, changes=[]) def test_update_with_too_many_properties(self): self.config(show_multiple_locations=True) self.config(user_storage_quota='1') new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes=changes) def test_update_replace_base_attribute(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() request.context.is_admin = True changes = [{'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'op': 'replace', 'path': ['owner'], 'value': TENANT3}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('fedora', output.name) self.assertEqual(TENANT3, output.owner) self.assertEqual({'foo': 'bar'}, output.extra_properties) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_onwer_non_admin(self): request = unit_test_utils.get_fake_request() request.context.is_admin = False changes = [{'op': 'replace', 'path': ['owner'], 'value': TENANT3}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_replace_tags(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.tags)) self.assertIn('king', output.tags) self.assertIn('kong', output.tags) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_property(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) changes = [ {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_too_many_properties(self): self.config(image_property_quota=1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_reserved_not_counted_in_quota(self): self.config(image_property_quota=1) request = unit_test_utils.get_fake_request() self.db.image_update(None, UUID1, {'properties': { 'os_glance_foo': '123', 'os_glance_bar': 456}}) changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, ] # Should succeed self.controller.update(request, UUID1, changes) changes = [ {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] # Should fail, over quota self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_and_remove_too_many_properties(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_unlimited_properties(self): self.config(image_property_quota=-1) request = unit_test_utils.get_fake_request() output = self.controller.show(request, UUID1) changes = [{'op': 'add', 'path': ['foo'], 'value': 'bar'}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertNotEqual(output.created_at, output.updated_at) def test_update_format_properties(self): statuses_for_immutability = ['active', 'saving', 'killed'] request = unit_test_utils.get_fake_request(roles=['admin'], is_admin=True) for status in statuses_for_immutability: image = { 'id': str(uuid.uuid4()), 'status': status, 'disk_format': 'ari', 'container_format': 'ari', } self.db.image_create(None, image) changes = [ {'op': 'replace', 'path': ['disk_format'], 'value': 'ami'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image['id'], changes) changes = [ {'op': 'replace', 'path': ['container_format'], 'value': 'ami'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image['id'], changes) self.db.image_update(None, image['id'], {'status': 'queued'}) changes = [ {'op': 'replace', 'path': ['disk_format'], 'value': 'raw'}, {'op': 'replace', 'path': ['container_format'], 'value': 'bare'}, ] resp = self.controller.update(request, image['id'], changes) self.assertEqual('raw', resp.disk_format) self.assertEqual('bare', resp.container_format) def test_update_remove_property_while_over_limit(self): """Ensure that image properties can be removed. Image properties should be able to be removed as long as the image has fewer than the limited number of image properties after the transaction. """ request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'remove', 'path': ['snitch']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.extra_properties)) self.assertEqual('buzz', output.extra_properties['fizz']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_and_remove_property_under_limit(self): """Ensure that image properties can be removed. Image properties should be able to be added and removed simultaneously as long as the image has fewer than the limited number of image properties after the transaction. """ request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'remove', 'path': ['snitch']}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.extra_properties)) self.assertEqual('buzz', output.extra_properties['fizz']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_missing_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': 'foo', 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_prop_protection_with_create_and_permitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('bar', output.extra_properties['x_owner_foo']) def test_prop_protection_with_update_and_permitted_policy(self): self.set_property_protections(use_policies=True) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) request = unit_test_utils.get_fake_request(roles=['spl_role', 'admin']) image = {'name': 'image-1'} extra_props = {'spl_creator_policy': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('bar', created_image.extra_properties['spl_creator_policy']) another_request = unit_test_utils.get_fake_request(roles=['spl_role']) changes = [ {'op': 'replace', 'path': ['spl_creator_policy'], 'value': 'par'}, ] enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:spl_role" }) self.controller.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:admin" }) self.controller.policy = enforcer another_request = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('par', output.extra_properties['spl_creator_policy']) def test_prop_protection_with_create_with_patch_and_policy(self): self.set_property_protections(use_policies=True) enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) request = unit_test_utils.get_fake_request(roles=['spl_role', 'admin']) image = {'name': 'image-1'} extra_props = {'spl_default_policy': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'add', 'path': ['spl_creator_policy'], 'value': 'bar'}, ] enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:fake_role" }) self.controller.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:member" }) self.controller.policy = enforcer another_request = unit_test_utils.get_fake_request(roles=['member', 'spl_role']) output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('bar', output.extra_properties['spl_creator_policy']) def test_prop_protection_with_create_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) roles = ['fake_member'] enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:fake_member" }) self.controller.policy = enforcer another_request = unit_test_utils.get_fake_request(roles=roles) changes = [ {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_prop_protection_with_show_and_permitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['reader', 'member']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('bar', output.extra_properties['x_owner_foo']) def test_prop_protection_with_show_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['member']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['reader', 'fake_role']) output = self.controller.show(another_request, created_image.image_id) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_owner_foo') def test_prop_protection_with_update_and_permitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('baz', output.extra_properties['x_owner_foo']) def test_prop_protection_with_update_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:fake_role" }) self.controller.policy = enforcer another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_prop_protection_with_delete_and_permitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_owner_foo']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_owner_foo') def test_prop_protection_with_delete_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "role:fake_role" }) self.controller.policy = enforcer another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'remove', 'path': ['x_owner_foo']} ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_create_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'add', 'path': ['x_case_insensitive'], 'value': '1'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('1', output.extra_properties['x_case_insensitive']) def test_read_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['reader', 'member']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('1', output.extra_properties['x_case_insensitive']) def test_update_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'replace', 'path': ['x_case_insensitive'], 'value': '2'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('2', output.extra_properties['x_case_insensitive']) def test_delete_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer( suppress_deprecation_warnings=True) self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_case_insensitive']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_case_insensitive') def test_create_non_protected_prop(self): """Property marked with special char @ creatable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted_1': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('1', created_image.extra_properties['x_all_permitted_1']) another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) extra_props = {'x_all_permitted_2': '2'} created_image = self.controller.create(another_request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('2', created_image.extra_properties['x_all_permitted_2']) def test_read_non_protected_prop(self): """Property marked with special char @ readable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('1', output.extra_properties['x_all_permitted']) def test_update_non_protected_prop(self): """Property marked with special char @ updatable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member', 'joe_soap']) changes = [ {'op': 'replace', 'path': ['x_all_permitted'], 'value': 'baz'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('baz', output.extra_properties['x_all_permitted']) def test_delete_non_protected_prop(self): """Property marked with special char @ deletable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_all_permitted']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_all_permitted') def test_create_locked_down_protected_prop(self): """Property marked with special char ! creatable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) roles = ['fake_member'] another_request = unit_test_utils.get_fake_request(roles=roles) changes = [ {'op': 'add', 'path': ['x_none_permitted'], 'value': 'bar'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_read_locked_down_protected_prop(self): """Property marked with special char ! readable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['member']) image = {'name': 'image-1'} extra_props = {'x_none_read': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) output = self.controller.show(another_request, created_image.image_id) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_none_read') def test_update_locked_down_protected_prop(self): """Property marked with special char ! updatable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_none_update': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member', 'fake_role']) changes = [ {'op': 'replace', 'path': ['x_none_update'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_delete_locked_down_protected_prop(self): """Property marked with special char ! deletable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_none_delete': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member', 'fake_role']) changes = [ {'op': 'remove', 'path': ['x_none_delete']} ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_update_replace_locations_non_empty(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location]}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_replace_locations_metadata_update(self): self.config(show_multiple_locations=True) location = {'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {'a': 1}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [location]}] output = self.controller.update(request, UUID1, changes) self.assertEqual({'a': 1}, output.locations[0]['metadata']) def test_locations_actions_with_locations_invisible(self): self.config(show_multiple_locations=False) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location]}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_replace_locations_invalid(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_add_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_base_property_json_schema_version_4(self): request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 4, 'op': 'add', 'path': ['name'], 'value': 'fedora' }] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_extra_property_json_schema_version_4(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 4, 'op': 'add', 'path': ['foo'], 'value': 'baz' }] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_base_property_json_schema_version_10(self): request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 10, 'op': 'add', 'path': ['name'], 'value': 'fedora' }] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('fedora', output.name) def test_update_add_extra_property_json_schema_version_10(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'baz' }] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'baz'}, output.extra_properties) def test_update_add_property_already_present_json_schema_version_4(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) changes = [ {'json_schema_version': 4, 'op': 'add', 'path': ['foo'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_property_already_present_json_schema_version_10(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) changes = [ {'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'baz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'baz'}, output.extra_properties) def test_update_add_locations(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[1]) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_replace_locations_on_queued(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location1 = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} new_location2 = {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location1, new_location2]}] output = self.controller.update(request, image_id, changes) self.assertEqual(image_id, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location1['url'], output.locations[0]['url']) self.assertEqual(new_location2['url'], output.locations[1]['url']) self.assertEqual('active', output.status) self.assertEqual(CHKSUM, output.checksum) self.assertEqual('sha512', output.os_hash_algo) self.assertEqual(MULTIHASH1, output.os_hash_value) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_replace_locations_identify_associated_store( self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) self.config(enabled_backends={'fake-store': 'http'}) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location1 = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} new_location2 = {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location1, new_location2]}] with mock.patch.object(store_utils, '_get_store_id_from_uri') as mock_store: mock_store.return_value = 'fake-store' # ensure location metadata is updated new_location1['metadata']['store'] = 'fake-store' new_location1['metadata']['store'] = 'fake-store' output = self.controller.update(request, image_id, changes) self.assertEqual(2, len(output.locations)) self.assertEqual(image_id, output.image_id) self.assertEqual(new_location1, output.locations[0]) self.assertEqual(new_location2, output.locations[1]) self.assertEqual('active', output.status) self.assertEqual(CHKSUM, output.checksum) self.assertEqual('sha512', output.os_hash_algo) self.assertEqual(MULTIHASH1, output.os_hash_value) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_replace_locations_unknon_locations( self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) self.config(enabled_backends={'fake-store': 'http'}) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location1 = {'url': 'unknown://whocares', 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} new_location2 = {'url': 'unknown://whatever', 'metadata': {'store': 'unkstore'}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location1, new_location2]}] output = self.controller.update(request, image_id, changes) self.assertEqual(2, len(output.locations)) self.assertEqual(image_id, output.image_id) self.assertEqual('active', output.status) self.assertEqual(CHKSUM, output.checksum) self.assertEqual('sha512', output.os_hash_algo) self.assertEqual(MULTIHASH1, output.os_hash_value) # ensure location metadata is same self.assertEqual(new_location1, output.locations[0]) self.assertEqual(new_location2, output.locations[1]) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_new_validation_data_on_active(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='active', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaisesRegex( webob.exc.HTTPConflict, "may only be provided when image status is 'queued'", self.controller.update, request, image_id, changes) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_replace_locations_different_validation_data(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='active', checksum=CHKSUM, os_hash_algo='sha512', os_hash_value=MULTIHASH1), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM1, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH2}} changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location]}] self.assertRaisesRegex( webob.exc.HTTPConflict, "already set with a different value", self.controller.update, request, image_id, changes) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def _test_add_location_on_queued(self, visibility, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', visibility=visibility, status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, image_id, changes) self.assertEqual(image_id, output.image_id) self.assertEqual(1, len(output.locations)) self.assertEqual(new_location, output.locations[0]) self.assertEqual('active', output.status) self.assertEqual(visibility, output.visibility) mock_set_acls.assert_called_once() def test_add_location_on_queued_shared(self): self._test_add_location_on_queued('shared') def test_add_location_on_queued_community(self): self._test_add_location_on_queued('community') def test_add_location_on_queued_public(self): self._test_add_location_on_queued('public') @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_identify_associated_store( self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) self.config(enabled_backends={'fake-store': 'http'}) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] with mock.patch.object(store_utils, '_get_store_id_from_uri') as mock_store: mock_store.return_value = 'fake-store' output = self.controller.update(request, image_id, changes) self.assertEqual(image_id, output.image_id) self.assertEqual(1, len(output.locations)) self.assertEqual('active', output.status) # ensure location metadata is updated new_location['metadata']['store'] = 'fake-store' self.assertEqual(new_location, output.locations[0]) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_unknown_locations( self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) self.config(enabled_backends={'fake-store': 'http'}) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) new_location = {'url': 'unknown://whocares', 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, image_id, changes) self.assertEqual(image_id, output.image_id) self.assertEqual('active', output.status) self.assertEqual(1, len(output.locations)) # ensure location metadata is same self.assertEqual(new_location, output.locations[0]) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_invalid_validation_data(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=None, os_hash_algo=None, os_hash_value=None, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() location = { 'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {} } changes = [{'op': 'add', 'path': ['locations', '-'], 'value': location}] changes[0]['value']['validation_data'] = { 'checksum': 'something the same length as md5', 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, } self.assertRaisesRegex( webob.exc.HTTPConflict, 'checksum .* is not a valid hexadecimal value', self.controller.update, request, image_id, changes) changes[0]['value']['validation_data'] = { 'checksum': '0123456789abcdef', 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, } self.assertRaisesRegex( webob.exc.HTTPConflict, 'checksum .* is not the correct size', self.controller.update, request, image_id, changes) changes[0]['value']['validation_data'] = { 'checksum': CHKSUM, 'os_hash_algo': 'sha256', 'os_hash_value': MULTIHASH1, } self.assertRaisesRegex( webob.exc.HTTPConflict, 'os_hash_algo must be sha512', self.controller.update, request, image_id, changes) changes[0]['value']['validation_data'] = { 'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': 'not a hex value', } self.assertRaisesRegex( webob.exc.HTTPConflict, 'os_hash_value .* is not a valid hexadecimal value', self.controller.update, request, image_id, changes) changes[0]['value']['validation_data'] = { 'checksum': CHKSUM, 'os_hash_algo': 'sha512', 'os_hash_value': '0123456789abcdef', } self.assertRaisesRegex( webob.exc.HTTPConflict, 'os_hash_value .* is not the correct size for sha512', self.controller.update, request, image_id, changes) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_same_validation_data(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) os_hash_value = '6513f21e44aa3da349f248188a44bc304a3653a04122d8fb45' \ '35423c8e1d14cd6a153f735bb0982e2161b5b5186106570c17' \ 'a9e58b64dd39390617cd5a350f78' self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='active', checksum='checksum1', os_hash_algo='sha512', os_hash_value=os_hash_value), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': 'checksum1', 'os_hash_algo': 'sha512', 'os_hash_value': os_hash_value}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, image_id, changes) self.assertEqual(image_id, output.image_id) self.assertEqual(1, len(output.locations)) self.assertEqual(new_location, output.locations[0]) self.assertEqual('active', output.status) @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') @mock.patch.object(store, 'get_size_from_backend') def test_add_location_different_validation_data(self, mock_get_size, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc): mock_calc.return_value = 1 mock_get_size.return_value = 1 mock_get_size_uri.return_value = 1 self.config(show_multiple_locations=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='active', checksum=CHKSUM, os_hash_algo='sha512', os_hash_value=MULTIHASH1), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}, 'validation_data': {'checksum': CHKSUM1, 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH2}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaisesRegex( webob.exc.HTTPConflict, "already set with a different value", self.controller.update, request, image_id, changes) def _test_update_locations_status(self, image_status, update): self.config(show_multiple_locations=True) self.images = [ _db_fixture('1', owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status=image_status), ] request = unit_test_utils.get_fake_request() if image_status == 'deactivated': self.db.image_create(request.context, self.images[0]) else: self.db.image_create(None, self.images[0]) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} changes = [{'op': update, 'path': ['locations', '-'], 'value': new_location}] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, '1', changes) def test_location_add_not_permitted_status_saving(self): self._test_update_locations_status('saving', 'add') def test_location_add_not_permitted_status_deactivated(self): self._test_update_locations_status('deactivated', 'add') def test_location_add_not_permitted_status_deleted(self): self._test_update_locations_status('deleted', 'add') def test_location_add_not_permitted_status_pending_delete(self): self._test_update_locations_status('pending_delete', 'add') def test_location_add_not_permitted_status_killed(self): self._test_update_locations_status('killed', 'add') def test_location_add_not_permitted_status_importing(self): self._test_update_locations_status('importing', 'add') def test_location_add_not_permitted_status_uploading(self): self._test_update_locations_status('uploading', 'add') def test_location_remove_not_permitted_status_saving(self): self._test_update_locations_status('saving', 'remove') def test_location_remove_not_permitted_status_deactivated(self): self._test_update_locations_status('deactivated', 'remove') def test_location_remove_not_permitted_status_deleted(self): self._test_update_locations_status('deleted', 'remove') def test_location_remove_not_permitted_status_pending_delete(self): self._test_update_locations_status('pending_delete', 'remove') def test_location_remove_not_permitted_status_killed(self): self._test_update_locations_status('killed', 'remove') def test_location_remove_not_permitted_status_queued(self): self._test_update_locations_status('queued', 'remove') def test_location_remove_not_permitted_status_importing(self): self._test_update_locations_status('importing', 'remove') def test_location_remove_not_permitted_status_uploading(self): self._test_update_locations_status('uploading', 'remove') def test_location_replace_not_permitted_status_saving(self): self._test_update_locations_status('saving', 'replace') def test_location_replace_not_permitted_status_deactivated(self): self._test_update_locations_status('deactivated', 'replace') def test_location_replace_not_permitted_status_deleted(self): self._test_update_locations_status('deleted', 'replace') def test_location_replace_not_permitted_status_pending_delete(self): self._test_update_locations_status('pending_delete', 'replace') def test_location_replace_not_permitted_status_killed(self): self._test_update_locations_status('killed', 'replace') def test_location_replace_not_permitted_status_importing(self): self._test_update_locations_status('importing', 'replace') def test_location_replace_not_permitted_status_uploading(self): self._test_update_locations_status('uploading', 'replace') def test_update_add_locations_insertion(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '0'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[0]) def test_update_add_locations_list(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_locations_invalid(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'unknow://foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'add', 'path': ['locations', None], 'value': {'url': 'unknow://foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_duplicate_locations(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[1]) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_too_many_locations(self): self.config(show_multiple_locations=True) self.config(image_location_quota=1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_and_remove_too_many_locations(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_3' % BASE_URI, 'metadata': {}}}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_unlimited_locations(self): self.config(show_multiple_locations=True) self.config(image_location_quota=-1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_location_while_over_limit(self): """Ensure that image locations can be removed. Image locations should be able to be removed as long as the image has fewer than the limited number of image locations after the transaction. """ self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=1) self.config(show_multiple_locations=True) # We must remove two locations to avoid being over # the limit of 1 location changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'remove', 'path': ['locations', '0']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.locations)) self.assertIn('fake_location_2', output.locations[0]['url']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_and_remove_location_under_limit(self): """Ensure that image locations can be removed. Image locations should be able to be added and removed simultaneously as long as the image has fewer than the limited number of image locations after the transaction. """ self.mock_object(store, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=2) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'remove', 'path': ['locations', '0']}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_3' % BASE_URI, 'metadata': {}}}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertIn('fake_location_3', output.locations[1]['url']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_base_property(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{'op': 'remove', 'path': ['name']}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_remove_property(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) changes = [ {'op': 'remove', 'path': ['snitch']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'bar'}, output.extra_properties) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_missing_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'remove', 'path': ['foo']}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_remove_location(self): self.config(show_multiple_locations=True) self.mock_object(store, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '0']}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.locations)) self.assertEqual('active', output.status) def test_update_remove_location_invalid_pos(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location' % BASE_URI, 'metadata': {}}}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', None]}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '-1']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '99']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', 'x']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_remove_location_store_exception(self): self.config(show_multiple_locations=True) def fake_delete_image_location_from_backend(self, *args, **kwargs): raise Exception('fake_backend_exception') self.mock_object(self.store_utils, 'delete_image_location_from_backend', fake_delete_image_location_from_backend) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location' % BASE_URI, 'metadata': {}}}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '0']}] self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, request, UUID1, changes) def test_update_multiple_changes(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) changes = [ {'op': 'replace', 'path': ['min_ram'], 'value': 128}, {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, {'op': 'remove', 'path': ['snitch']}, {'op': 'add', 'path': ['kb'], 'value': 'dvorak'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(128, output.min_ram) self.addDetail('extra_properties', testtools.content.json_content( jsonutils.dumps(output.extra_properties))) self.assertEqual(2, len(output.extra_properties)) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('dvorak', output.extra_properties['kb']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_invalid_operation(self): request = unit_test_utils.get_fake_request() change = {'op': 'test', 'path': 'options', 'value': 'puts'} try: self.controller.update(request, UUID1, [change]) except AttributeError: pass # AttributeError is the desired behavior else: self.fail('Failed to raise AssertionError on %s' % change) def test_update_duplicate_tags(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['tags'], 'value': ['ping', 'ping']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(1, len(output.tags)) self.assertIn('ping', output.tags) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.update', output_log['event_type']) self.assertEqual(UUID1, output_log['payload']['id']) def test_update_disabled_notification(self): self.config(disabled_notifications=["image.update"]) request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['name'], 'value': 'Ping Pong'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual('Ping Pong', output.name) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_delete(self): request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) try: self.controller.delete(request, UUID1) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual("image.delete", output_log['event_type']) except Exception as e: self.fail("Delete raised exception: %s" % e) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_not_allowed_by_policy(self): request = unit_test_utils.get_fake_request() with mock.patch.object(self.controller.policy, 'enforce') as mock_enf: mock_enf.side_effect = webob.exc.HTTPForbidden() exc = self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, UUID1) self.assertTrue(mock_enf.called) # Make sure we did not leak details of the original Forbidden # error into the NotFound returned to the client. self.assertEqual('The resource could not be found.', str(exc)) # Now reject the delete_image call, but allow get_image to ensure that # we properly see a Forbidden result. with mock.patch.object(self.controller.policy, 'enforce') as mock_enf: mock_enf.side_effect = [webob.exc.HTTPForbidden(), lambda *a: None] exc = self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID1) self.assertTrue(mock_enf.called) @mock.patch.object(store, 'get_store_from_store_identifier') @mock.patch.object(store.location, 'get_location_from_uri_and_backend') @mock.patch.object(store_utils, 'get_dir_separator') def test_verify_staging_data_deleted_on_image_delete( self, mock_get_dir_separator, mock_location, mock_store): self.config(enabled_backends={'fake-store': 'file'}) fake_staging_store = mock.Mock() mock_store.return_value = fake_staging_store mock_get_dir_separator.return_value = ( "/", "/tmp/os_glance_staging_store") image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='importing', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() try: self.controller.delete(request, image_id) self.assertEqual(1, mock_store.call_count) mock_store.assert_called_once_with("os_glance_staging_store") self.assertEqual(1, mock_location.call_count) fake_staging_store.delete.assert_called_once() except Exception as e: self.fail("Delete raised exception: %s" % e) deleted_img = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) def test_delete_with_tags(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['tags'], 'value': ['many', 'cool', 'new', 'tags']}, ] self.controller.update(request, UUID1, changes) self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) self.controller.delete(request, UUID1) output_logs = self.notifier.get_logs() # Get `delete` event from logs output_delete_logs = [output_log for output_log in output_logs if output_log['event_type'] == 'image.delete'] self.assertEqual(1, len(output_delete_logs)) output_log = output_delete_logs[0] self.assertEqual('INFO', output_log['notification_type']) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_disabled_notification(self): self.config(disabled_notifications=["image.delete"]) request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) try: self.controller.delete(request, UUID1) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) except Exception as e: self.fail("Delete raised exception: %s" % e) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_queued_updates_status(self): """Ensure status of queued image is updated (LP bug #1048851)""" request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'queued'}) image_id = image['id'] self.controller.delete(request, image_id) image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_queued_updates_status_delayed_delete(self): """Ensure status of queued image is updated (LP bug #1048851). Must be set to 'deleted' when delayed_delete isenabled. """ self.config(delayed_delete=True) request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'queued'}) image_id = image['id'] self.controller.delete(request, image_id) image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_not_in_store(self): request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) for k in self.store.data: if UUID1 in k: del self.store.data[k] break self.controller.delete(request, UUID1) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delayed_delete(self): self.config(delayed_delete=True) request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) self.controller.delete(request, UUID1) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('pending_delete', deleted_img['status']) self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_non_existent(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, str(uuid.uuid4())) def test_delete_already_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, UUID1) def test_delete_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, UUID4) def test_delete_in_use(self): def fake_safe_delete_from_backend(self, *args, **kwargs): raise store.exceptions.InUseByStore() self.mock_object(self.store_utils, 'safe_delete_from_backend', fake_safe_delete_from_backend) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, request, UUID1) def test_delete_has_snapshot(self): def fake_safe_delete_from_backend(self, *args, **kwargs): raise store.exceptions.HasSnapshot() self.mock_object(self.store_utils, 'safe_delete_from_backend', fake_safe_delete_from_backend) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, request, UUID1) def test_delete_to_unallowed_status(self): # from deactivated to pending-delete self.config(delayed_delete=True) request = unit_test_utils.get_fake_request(is_admin=True) self.action_controller.deactivate(request, UUID1) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, request, UUID1) def test_delete_uploading_status_image(self): """Ensure uploading image is deleted (LP bug #1733289) Ensure image stuck in uploading state is deleted (LP bug #1836140) """ request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'uploading'}) image_id = image['id'] with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = True with mock.patch.object(os, "unlink") as mock_unlik: self.controller.delete(request, image_id) self.assertEqual(1, mock_exists.call_count) self.assertEqual(1, mock_unlik.call_count) # Ensure that image is deleted image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_deletion_of_staging_data_failed(self): """Ensure uploading image is deleted (LP bug #1733289) Ensure image stuck in uploading state is deleted (LP bug #1836140) """ request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'uploading'}) image_id = image['id'] with mock.patch.object(os.path, 'exists') as mock_exists: mock_exists.return_value = False with mock.patch.object(os, "unlink") as mock_unlik: self.controller.delete(request, image_id) self.assertEqual(1, mock_exists.call_count) self.assertEqual(0, mock_unlik.call_count) # Ensure that image is deleted image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_from_store_no_multistore(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_from_store, request, "the IDs should", "not matter") def test_index_with_invalid_marker(self): fake_uuid = str(uuid.uuid4()) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_invalid_locations_op_pos(self): pos = self.controller._get_locations_op_pos(None, 2, True) self.assertIsNone(pos) pos = self.controller._get_locations_op_pos('1', None, True) self.assertIsNone(pos) @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') @mock.patch.object(glance.domain.TaskExecutorFactory, 'new_task_executor') @mock.patch('glance.api.common.get_thread_pool') @mock.patch('glance.quota.keystone.enforce_image_size_total') def test_image_import(self, mock_enforce, mock_gtp, mock_nte, mock_nt, mock_spa): request = unit_test_utils.get_fake_request() image = FakeImage(status='uploading') with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = image output = self.controller.import_image( request, UUID4, {'method': {'name': 'glance-direct'}}) self.assertEqual(UUID4, output) # Make sure we checked quota mock_enforce.assert_called_once_with(request.context, request.context.project_id) # Make sure we set the lock on the image mock_spa.assert_called_once_with(UUID4, 'os_glance_import_task', mock_nt.return_value.task_id) # Make sure we grabbed a thread pool, and that we asked it # to spawn the task's run method with it. mock_gtp.assert_called_once_with('tasks_pool') mock_gtp.return_value.spawn.assert_called_once_with( mock_nt.return_value.run, mock_nte.return_value) @mock.patch.object(glance.domain.TaskFactory, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_not_allowed(self, mock_get, mock_new_task): # NOTE(danms): FakeImage is owned by utils.TENANT1. Try to do the # import as TENANT2 and we should get an HTTPForbidden enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "modify_image": "'{0}':%(owner)s".format(TENANT2) }) request = unit_test_utils.get_fake_request() self.controller.policy = enforcer mock_get.return_value = FakeImage(status='uploading') self.assertRaises(webob.exc.HTTPForbidden, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) # NOTE(danms): Make sure we failed early and never even created # a task mock_new_task.assert_not_called() @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') @mock.patch('glance.quota.keystone.enforce_image_size_total') def test_image_import_quota_fail(self, mock_enforce, mock_get): request = unit_test_utils.get_fake_request() mock_get.return_value = FakeImage(status='uploading') mock_enforce.side_effect = exception.LimitExceeded('test') self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch('glance.context.RequestContext.elevated') @mock.patch.object(glance.domain.TaskFactory, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_copy_allowed_by_policy(self, mock_get, mock_new_task, mock_elevated, mock_spa, allowed=True): # NOTE(danms): FakeImage is owned by utils.TENANT1. Try to do the # import as TENANT2, but with a policy exception request = unit_test_utils.get_fake_request(tenant=TENANT2) mock_get.return_value = FakeImage(status='active', locations=[]) self.policy.rules = {'copy_image': allowed} req_body = {'method': {'name': 'copy-image'}, 'stores': ['cheap']} with mock.patch.object( self.controller.gateway, 'get_task_executor_factory', side_effect=self.controller.gateway.get_task_executor_factory ) as mock_tef: self.controller.import_image(request, UUID4, req_body) # Make sure we passed an admin context to our task executor factory mock_tef.assert_called_once_with( request.context, admin_context=mock_elevated.return_value) expected_input = {'image_id': UUID4, 'import_req': mock.ANY, 'backend': mock.ANY} mock_new_task.assert_called_with(task_type='api_image_import', owner=TENANT2, task_input=expected_input, image_id=UUID4, user_id=request.context.user_id, request_id=request.context.request_id) def test_image_import_copy_not_allowed_by_policy(self): # Make sure that if the policy check fails, we fail a copy-image with # Forbidden self.assertRaises(webob.exc.HTTPForbidden, self.test_image_import_copy_allowed_by_policy, allowed=False) @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_locked(self, mock_get): task = test_tasks_resource._db_fixture(test_tasks_resource.UUID1, status='pending') self.db.task_create(None, task) image = FakeImage(status='uploading') # Image is locked with a valid task that has not aged out, so # the lock will not be busted. image.extra_properties['os_glance_import_task'] = task['id'] mock_get.return_value = image request = unit_test_utils.get_fake_request(tenant=TENANT1) req_body = {'method': {'name': 'glance-direct'}} exc = self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID1, req_body) self.assertEqual('Image has active task', str(exc)) @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch('glance.db.simple.api.image_delete_property_atomic') @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_locked_by_reaped_task(self, mock_get, mock_nt, mock_dpi, mock_spi): image = FakeImage(status='uploading') # Image is locked by some other task that TaskRepo will not find image.extra_properties['os_glance_import_task'] = 'missing' mock_get.return_value = image request = unit_test_utils.get_fake_request(tenant=TENANT1) req_body = {'method': {'name': 'glance-direct'}} mock_nt.return_value.task_id = 'mytask' self.controller.import_image(request, UUID1, req_body) # We should have atomically deleted the missing task lock mock_dpi.assert_called_once_with(image.id, 'os_glance_import_task', 'missing') # We should have atomically grabbed the lock with our task id mock_spi.assert_called_once_with(image.id, 'os_glance_import_task', 'mytask') @mock.patch.object(glance.notifier.ImageRepoProxy, 'save') @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch('glance.db.simple.api.image_delete_property_atomic') @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_image_import_locked_by_bustable_task(self, mock_get, mock_nt, mock_dpi, mock_spi, mock_save, task_status='processing'): if task_status == 'processing': # NOTE(danms): Only set task_input on one of the tested # states to make sure we don't choke on a task without # some of the data set yet. task_input = {'backend': ['store2']} else: task_input = {} task = test_tasks_resource._db_fixture( test_tasks_resource.UUID1, status=task_status, input=task_input) self.db.task_create(None, task) image = FakeImage(status='uploading') # Image is locked by a task in 'processing' state image.extra_properties['os_glance_import_task'] = task['id'] image.extra_properties['os_glance_importing_to_stores'] = 'store2' mock_get.return_value = image request = unit_test_utils.get_fake_request(tenant=TENANT1) req_body = {'method': {'name': 'glance-direct'}} # Task has only been running for ten minutes time_fixture = fixture.TimeFixture(task['updated_at'] + datetime.timedelta(minutes=10)) self.useFixture(time_fixture) mock_nt.return_value.task_id = 'mytask' # Task holds the lock, API refuses to bust it self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID1, req_body) mock_dpi.assert_not_called() mock_spi.assert_not_called() mock_nt.assert_not_called() # Fast forward to 90 minutes from now time_fixture.advance_time_delta(datetime.timedelta(minutes=90)) self.controller.import_image(request, UUID1, req_body) # API deleted the other task's lock and locked it for us mock_dpi.assert_called_once_with(image.id, 'os_glance_import_task', task['id']) mock_spi.assert_called_once_with(image.id, 'os_glance_import_task', 'mytask') # If we stored task_input with information about the stores # and thus triggered the cleanup code, make sure that cleanup # happened here. if task_status == 'processing': self.assertNotIn('store2', image.extra_properties[ 'os_glance_importing_to_stores']) def test_image_import_locked_by_bustable_terminal_task_failure(self): # Make sure we don't fail with a task status transition error self.test_image_import_locked_by_bustable_task(task_status='failure') def test_image_import_locked_by_bustable_terminal_task_success(self): # Make sure we don't fail with a task status transition error self.test_image_import_locked_by_bustable_task(task_status='success') def test_cleanup_stale_task_progress(self): img_repo = mock.MagicMock() image = mock.MagicMock() task = mock.MagicMock() # No backend info from the old task, means no action task.task_input = {} image.extra_properties = {} self.controller._cleanup_stale_task_progress(img_repo, image, task) img_repo.save.assert_not_called() # If we have info but no stores, no action task.task_input = {'backend': []} self.controller._cleanup_stale_task_progress(img_repo, image, task) img_repo.save.assert_not_called() # If task had stores, but image does not have those stores in # the lists, no action task.task_input = {'backend': ['store1', 'store2']} self.controller._cleanup_stale_task_progress(img_repo, image, task) img_repo.save.assert_not_called() # If the image has stores in the lists, but not the ones we care # about, make sure they are not disturbed image.extra_properties = {'os_glance_failed_import': 'store3'} self.controller._cleanup_stale_task_progress(img_repo, image, task) img_repo.save.assert_not_called() # Only if the image has stores that relate to our old task should # take action, and only on those stores. image.extra_properties = { 'os_glance_importing_to_stores': 'foo,store1,bar', 'os_glance_failed_import': 'foo,store2,bar', } self.controller._cleanup_stale_task_progress(img_repo, image, task) img_repo.save.assert_called_once_with(image) self.assertEqual({'os_glance_importing_to_stores': 'foo,bar', 'os_glance_failed_import': 'foo,bar'}, image.extra_properties) def test_bust_import_lock_race_to_delete(self): image_repo = mock.MagicMock() task_repo = mock.MagicMock() image = mock.MagicMock() task = mock.MagicMock(id='foo') # Simulate a race where we tried to bust a specific lock and # someone else already had, and/or re-locked it image_repo.delete_property_atomic.side_effect = exception.NotFound self.assertRaises(exception.Conflict, self.controller._bust_import_lock, image_repo, task_repo, image, task, task.id) def test_enforce_lock_log_not_bustable(self, task_status='processing'): task = test_tasks_resource._db_fixture( test_tasks_resource.UUID1, status=task_status) self.db.task_create(None, task) request = unit_test_utils.get_fake_request(tenant=TENANT1) image = FakeImage() image.extra_properties['os_glance_import_task'] = task['id'] # Freeze time to make this repeatable time_fixture = fixture.TimeFixture(task['updated_at'] + datetime.timedelta(minutes=55)) self.useFixture(time_fixture) expected_expire = 300 if task_status == 'pending': # NOTE(danms): Tasks in 'pending' get double the expiry time, # so we'd be expecting an extra hour here. expected_expire += 3600 with mock.patch.object(glance.api.v2.images, 'LOG') as mock_log: self.assertRaises(exception.Conflict, self.controller._enforce_import_lock, request, image) mock_log.warning.assert_called_once_with( 'Image %(image)s has active import task %(task)s in ' 'status %(status)s; lock remains valid for %(expire)i ' 'more seconds', {'image': image.id, 'task': task['id'], 'status': task_status, 'expire': expected_expire}) def test_enforce_lock_pending_takes_longer(self): self.test_enforce_lock_log_not_bustable(task_status='pending') def test_delete_encryption_key_no_encryption_key(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties={}) self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key_no_deletion_policy(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key_do_not_delete(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, 'cinder_encryption_key_deletion_policy': 'do_not_delete', } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key_forbidden(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) with mock.patch.object(self.controller._key_manager, 'delete', side_effect=castellan_exception.Forbidden): self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key_not_found(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) with mock.patch.object(self.controller._key_manager, 'delete', side_effect=castellan_exception.ManagedObjectNotFoundError): # noqa self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key_error(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) with mock.patch.object(self.controller._key_manager, 'delete', side_effect=castellan_exception.KeyManagerError): # noqa self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is still there key = self.controller._key_manager.get(request.context, fake_encryption_key) self.assertEqual(fake_encryption_key, key._id) def test_delete_encryption_key(self): request = unit_test_utils.get_fake_request() fake_encryption_key = self.controller._key_manager.store( request.context, mock.Mock()) props = { 'cinder_encryption_key_id': fake_encryption_key, 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } image = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) self.controller._delete_encryption_key(request.context, image) # Make sure the encryption key is gone self.assertRaises(castellan_exception.ManagedObjectNotFoundError, self.controller._key_manager.get, request.context, fake_encryption_key) def test_delete_no_encryption_key_id(self): request = unit_test_utils.get_fake_request() extra_props = { 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } created_image = self.controller.create(request, image={'name': 'image-1'}, extra_properties=extra_props, tags=[]) image_id = created_image.image_id self.controller.delete(request, image_id) # Ensure that image is deleted image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_invalid_encryption_key_id(self): request = unit_test_utils.get_fake_request() extra_props = { 'cinder_encryption_key_id': 'invalid', 'cinder_encryption_key_deletion_policy': 'on_image_deletion', } created_image = self.controller.create(request, image={'name': 'image-1'}, extra_properties=extra_props, tags=[]) image_id = created_image.image_id self.controller.delete(request, image_id) # Ensure that image is deleted image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_invalid_encryption_key_deletion_policy(self): request = unit_test_utils.get_fake_request() extra_props = { 'cinder_encryption_key_deletion_policy': 'invalid', } created_image = self.controller.create(request, image={'name': 'image-1'}, extra_properties=extra_props, tags=[]) image_id = created_image.image_id self.controller.delete(request, image_id) # Ensure that image is deleted image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') def test_add_location(self, mock_task): # Test add location without service role but with http store self.config(do_secure_hash=True) self.config(default_store='http', group='glance_store') image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() url = '%s/fake_location_1' % BASE_URI task_input = { "image_id": image_id, "loc_url": url, "validation_data": {} } request = unit_test_utils.get_fake_request() req_body = {'url': url} self.controller.add_location(request, image_id, req_body) mock_task.assert_called_with(task_type='location_import', owner=TENANT1, task_input=task_input, image_id=image_id, user_id=request.context.user_id, request_id=request.context.request_id) @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') def test_add_location_with_service_role(self, mock_task): # Need to make sure 'http' store is not enabled self.config(stores='file', group='glance_store') self.config(do_secure_hash=True) image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request(roles=['service']) url = '%s/fake_location_1' % BASE_URI task_input = { "image_id": image_id, "loc_url": url, "validation_data": {} } req_body = {'url': url} self.controller.add_location(request, image_id, req_body) mock_task.assert_called_with(task_type='location_import', owner=TENANT1, task_input=task_input, image_id=image_id, user_id=request.context.user_id, request_id=request.context.request_id) @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_add_location_locked(self, mock_get): task = test_tasks_resource._db_fixture(test_tasks_resource.UUID1, status='pending') self.db.task_create(None, task) image = FakeImage(status='queued') # Image is locked with a valid task that has not aged out, so # the lock will not be busted. image.extra_properties['os_glance_import_task'] = task['id'] mock_get.return_value = image request = unit_test_utils.get_fake_request(tenant=TENANT1) url = '%s/fake_location_1' % BASE_URI req_body = {'url': url} exc = self.assertRaises(webob.exc.HTTPConflict, self.controller.add_location, request, UUID1, req_body) self.assertEqual('Image has active task', str(exc)) @mock.patch.object(glance.notifier.ImageRepoProxy, 'save') @mock.patch('glance.db.simple.api.image_set_property_atomic') @mock.patch('glance.db.simple.api.image_delete_property_atomic') @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') @mock.patch.object(glance.notifier.ImageRepoProxy, 'get') def test_add_location_locked_by_bustable_task(self, mock_get, mock_nt, mock_dpi, mock_spi, mock_save, task_status='processing'): if task_status == 'processing': # NOTE(danms): Only set task_input on one of the tested # states to make sure we don't choke on a task without # some of the data set yet. task_input = {'backend': ['store2']} else: task_input = {} task = test_tasks_resource._db_fixture( test_tasks_resource.UUID1, status=task_status, input=task_input) self.db.task_create(None, task) image = FakeImage(status='queued') # Image is locked by a task in 'processing' state image.extra_properties['os_glance_import_task'] = task['id'] image.extra_properties['os_glance_importing_to_stores'] = 'store2' mock_get.return_value = image request = unit_test_utils.get_fake_request(tenant=TENANT1) url = '%s/fake_location_1' % BASE_URI req_body = {'url': url} # Task has only been running for ten minutes time_fixture = fixture.TimeFixture(task['updated_at'] + datetime.timedelta(minutes=10)) self.useFixture(time_fixture) mock_nt.return_value.task_id = 'mytask' # Task holds the lock, API refuses to bust it self.assertRaises(webob.exc.HTTPConflict, self.controller.add_location, request, UUID1, req_body) mock_dpi.assert_not_called() mock_spi.assert_not_called() mock_nt.assert_not_called() # Fast forward to 90 minutes from now time_fixture.advance_time_delta(datetime.timedelta(minutes=90)) self.controller.add_location(request, UUID1, req_body) # API deleted the other task's lock and locked it for us mock_dpi.assert_called_once_with(image.id, 'os_glance_import_task', task['id']) mock_spi.assert_called_once_with(image.id, 'os_glance_import_task', 'mytask') # If previous operation is still in processing, new operation # is not allowed mock_nt.return_value.task_id = 'mytask1' mock_spi.side_effect = exception.Duplicate request = unit_test_utils.get_fake_request(tenant=TENANT2) url = '%s/fake_location_2' % BASE_URI req_body = {'url': url} self.assertRaises(webob.exc.HTTPConflict, self.controller.add_location, request, UUID1, req_body) # If we stored task_input with information about the stores # and thus triggered the cleanup code, make sure that cleanup # happened here. if task_status == 'processing': self.assertNotIn('store2', image.extra_properties[ 'os_glance_importing_to_stores']) def test_add_location_locked_by_bustable_terminal_task_failure(self): # Make sure we don't fail with a task status transition error self.test_add_location_locked_by_bustable_task(task_status='failure') def test_add_location_locked_by_bustable_terminal_task_success(self): # Make sure we don't fail with a task status transition error self.test_add_location_locked_by_bustable_task(task_status='success') def test_add_location_by_non_owner(self): image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "add_location": "'{0}':%(owner)s".format(TENANT2) }) self.controller.policy = enforcer req_body = {'url': '%s/fake_location_1' % BASE_URI} self.assertRaisesRegex( webob.exc.HTTPForbidden, 'You are not authorized to complete add_image_location action.', self.controller.add_location, request, image_id, req_body) def test_add_location_without_service_role(self): # Need to make sure 'http' store is not enabled self.config(stores='file', group='glance_store') image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request(roles=['admin', 'member']) req_body = {'url': 'file://%s/%s' % (self.test_dir, UUID7)} self.assertRaisesRegex( webob.exc.HTTPForbidden, 'http store must be enabled to use location API by normal user.', self.controller.add_location, request, image_id, req_body) def test_add_location_to_invalid_image(self): image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() req_body = {'url': '%s/fake_location_1' % BASE_URI} self.assertRaisesRegex( webob.exc.HTTPNotFound, 'No image found with ID .*', self.controller.add_location, request, str(uuid.uuid4()), req_body) def test_add_location_to_active_image(self): image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='active'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() req_body = {'url': '%s/fake_location_1' % BASE_URI} self.assertRaises( webob.exc.HTTPConflict, self.controller.add_location, request, image_id, req_body) @mock.patch.object(store, 'get_size_from_backend') def test_add_location_with_invalid_validation_data( self, mock_get_size): mock_get_size.return_value = 1 image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=None, os_hash_algo=None, os_hash_value=None, name='1', disk_format='raw', container_format='bare', status='queued'), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request() validation_data = { 'os_hash_algo': 'sha256', 'os_hash_value': MULTIHASH1, } req_body = { 'url': '%s/fake_location_1' % BASE_URI, 'validation_data': validation_data } self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'os_hash_value: .* is not the correct size', self.controller.add_location, request, image_id, req_body) validation_data = { 'os_hash_algo': 'sha123', 'os_hash_value': MULTIHASH1, } req_body = { 'url': '%s/fake_location_1' % BASE_URI, 'validation_data': validation_data } self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'unsupported hash type .*', self.controller.add_location, request, image_id, req_body) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': 'not a hex value', } req_body = { 'url': '%s/fake_location_1' % BASE_URI, 'validation_data': validation_data } self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'os_hash_value .* is not a valid hexadecimal value', self.controller.add_location, request, image_id, req_body) validation_data = { 'os_hash_algo': 'sha512', 'os_hash_value': '0123456789abcdef', } req_body = { 'url': '%s/fake_location_1' % BASE_URI, 'validation_data': validation_data } self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'os_hash_value: .* is not the correct size', self.controller.add_location, request, image_id, req_body) def test_get_locations_by_owner_or_admin(self): url = '%s/fake_location_1' % BASE_URI image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status='active', locations=[{'url': url, 'metadata': {}, 'status': 'active'}]), ] self.db.image_create(None, self.images[0]) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "fetch_image_location": "role:service" }) self.controller.policy = enforcer request = unit_test_utils.get_fake_request(roles=['admin', 'member']) self.assertRaises( webob.exc.HTTPForbidden, self.controller.get_locations, request, image_id) def test_get_locations(self): image_id = str(uuid.uuid4()) url = '%s/fake_location_1' % BASE_URI self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status='active', locations=[{'url': url, 'metadata': {}, 'status': 'active'}]), ] self.db.image_create(None, self.images[0]) enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "fetch_image_location": "role:service" }) self.controller.policy = enforcer request = unit_test_utils.get_fake_request(roles=['service']) output = self.controller.get_locations(request, image_id) self.assertEqual(1, len(output)) self.assertEqual(url, output[0]['url']) def test_get_locations_of_non_existing_image(self): url = '%s/fake_location_1' % BASE_URI image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, checksum=CHKSUM, name='1', disk_format='raw', container_format='bare', status='active', locations=[{'url': url, 'metadata': {}, 'status': 'active'}]), ] self.db.image_create(None, self.images[0]) request = unit_test_utils.get_fake_request(roles=['member']) self.assertRaisesRegex( webob.exc.HTTPNotFound, 'No image found with ID .*', self.controller.get_locations, request, str(uuid.uuid4())) class TestImagesControllerPolicies(base.IsolatedUnitTest): def setUp(self): super(TestImagesControllerPolicies, self).setUp() self.db = unit_test_utils.FakeDB() self.policy = unit_test_utils.FakePolicyEnforcer() self.controller = glance.api.v2.images.ImagesController(self.db, self.policy) store = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(store) def test_index_unauthorized(self): rules = {"get_images": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request) def test_show_unauthorized(self): # Make sure that if policy says we can't see the image that we get a # NotFound result instead of a Forbidden one. rules = {"get_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, image_id=UUID2) def test_create_image_unauthorized(self): rules = {"add_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} extra_properties = {} tags = [] self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image, extra_properties, tags) def test_create_public_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() image = {'name': 'image-1', 'visibility': 'public'} extra_properties = {} tags = [] self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image, extra_properties, tags) def test_create_community_image_unauthorized(self): rules = {"communitize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() image = {'name': 'image-c1', 'visibility': 'community'} extra_properties = {} tags = [] self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image, extra_properties, tags) def test_update_unauthorized(self): rules = {"modify_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['name'], 'value': 'image-2'}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_publicize_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'public'}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_communitize_image_unauthorized(self): rules = {"communitize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'community'}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_depublicize_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'private'}] output = self.controller.update(request, UUID1, changes) self.assertEqual('private', output.visibility) def test_update_decommunitize_image_unauthorized(self): rules = {"communitize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'private'}] output = self.controller.update(request, UUID1, changes) self.assertEqual('private', output.visibility) def test_update_get_image_location_unauthorized(self): rules = {"get_image_location": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_set_image_location_unauthorized(self): def fake_delete_image_location_from_backend(self, *args, **kwargs): pass rules = {"set_image_location": False} self.policy.set_rules(rules) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_delete_image_location_unauthorized(self): rules = {"delete_image_location": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_delete_unauthorized(self): rules = {"delete_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID1) def test_add_location_unauthorized(self): rules = {"add_image_location": False} self.policy.set_rules(rules) body = {'url': '%s/fake_location' % BASE_URI} request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.add_location, request, UUID1, body) def test_get_locations_unauthorized(self): rules = {"fetch_image_location": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_locations, request, UUID1) class TestImagesDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializer, self).setUp() self.deserializer = glance.api.v2.images.RequestDeserializer() def test_create_minimal(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({}) output = self.deserializer.create(request) expected = {'image': {}, 'extra_properties': {}, 'tags': []} self.assertEqual(expected, output) def test_create_invalid_id(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'id': 'gabe'}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_id_to_image_id(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'id': UUID4}) output = self.deserializer.create(request) expected = {'image': {'image_id': UUID4}, 'extra_properties': {}, 'tags': []} self.assertEqual(expected, output) def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_full(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'id': UUID3, 'name': 'image-1', 'visibility': 'public', 'tags': ['one', 'two'], 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'foo': 'bar', 'protected': True, }) output = self.deserializer.create(request) properties = { 'image_id': UUID3, 'name': 'image-1', 'visibility': 'public', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'protected': True, } self.maxDiff = None expected = {'image': properties, 'extra_properties': {'foo': 'bar'}, 'tags': ['one', 'two']} self.assertEqual(expected, output) def test_create_invalid_property_key(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'id': UUID3, 'name': 'image-1', 'visibility': 'public', 'tags': ['one', 'two'], 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'f' * 256: 'bar', 'protected': True, }) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_readonly_attributes_forbidden(self): bodies = [ {'direct_url': 'http://example.com'}, {'self': 'http://example.com'}, {'file': 'http://example.com'}, {'schema': 'http://example.com'}, {'os_glance_foo': 'foo'}, ] for body in bodies: request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.create, request) def _get_fake_patch_request(self, content_type_minor_version=1): request = unit_test_utils.get_fake_request() template = 'application/openstack-images-v2.%d-json-patch' request.content_type = template % content_type_minor_version return request def test_update_empty_body(self): request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes([]) output = self.deserializer.update(request) expected = {'changes': []} self.assertEqual(expected, output) def test_update_unsupported_content_type(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/json-patch' request.body = jsonutils.dump_as_bytes([]) try: self.deserializer.update(request) except webob.exc.HTTPUnsupportedMediaType as e: # desired result, but must have correct Accept-Patch header accept_patch = ['application/openstack-images-v2.1-json-patch', 'application/openstack-images-v2.0-json-patch'] expected = ', '.join(sorted(accept_patch)) self.assertEqual(expected, e.headers['Accept-Patch']) else: self.fail('Did not raise HTTPUnsupportedMediaType') def test_update_body_not_a_list(self): bodies = [ {'op': 'add', 'path': '/someprop', 'value': 'somevalue'}, 'just some string', 123, True, False, None, ] for body in bodies: request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_invalid_changes(self): changes = [ ['a', 'list', 'of', 'stuff'], 'just some string', 123, True, False, None, {'op': 'invalid', 'path': '/name', 'value': 'fedora'} ] for change in changes: request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes([change]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_invalid_validation_data(self): request = self._get_fake_patch_request() changes = [{ 'op': 'add', 'path': '/locations/0', 'value': { 'url': 'http://localhost/fake', 'metadata': {}, } }] changes[0]['value']['validation_data'] = { 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, 'checksum': CHKSUM, } request.body = jsonutils.dump_as_bytes(changes) self.deserializer.update(request) changes[0]['value']['validation_data'] = { 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, 'checksum': CHKSUM, 'bogus_key': 'bogus_value', } changes[0]['value']['validation_data'] = { 'checksum': CHKSUM, } request.body = jsonutils.dump_as_bytes(changes) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'os_hash.* is a required property', self.deserializer.update, request) def test_update(self): request = self._get_fake_patch_request() body = [ {'op': 'replace', 'path': '/name', 'value': 'fedora'}, {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, {'op': 'replace', 'path': '/foo', 'value': 'bar'}, {'op': 'add', 'path': '/bebim', 'value': 'bap'}, {'op': 'remove', 'path': '/sparks'}, {'op': 'add', 'path': '/locations/-', 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'op': 'add', 'path': '/locations/10', 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'op': 'remove', 'path': '/locations/2'}, {'op': 'replace', 'path': '/locations', 'value': []}, {'op': 'replace', 'path': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['foo'], 'value': 'bar'}, {'json_schema_version': 10, 'op': 'add', 'path': ['bebim'], 'value': 'bap'}, {'json_schema_version': 10, 'op': 'remove', 'path': ['sparks']}, {'json_schema_version': 10, 'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'json_schema_version': 10, 'op': 'add', 'path': ['locations', '10'], 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'json_schema_version': 10, 'op': 'remove', 'path': ['locations', '2']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ]} self.assertEqual(expected, output) def test_update_v2_0_compatibility(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [ {'replace': '/name', 'value': 'fedora'}, {'replace': '/tags', 'value': ['king', 'kong']}, {'replace': '/foo', 'value': 'bar'}, {'add': '/bebim', 'value': 'bap'}, {'remove': '/sparks'}, {'add': '/locations/-', 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'add': '/locations/10', 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'remove': '/locations/2'}, {'replace': '/locations', 'value': []}, {'replace': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 4, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 4, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 4, 'op': 'replace', 'path': ['foo'], 'value': 'bar'}, {'json_schema_version': 4, 'op': 'add', 'path': ['bebim'], 'value': 'bap'}, {'json_schema_version': 4, 'op': 'remove', 'path': ['sparks']}, {'json_schema_version': 4, 'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'json_schema_version': 4, 'op': 'add', 'path': ['locations', '10'], 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'json_schema_version': 4, 'op': 'remove', 'path': ['locations', '2']}, {'json_schema_version': 4, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 4, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ]} self.assertEqual(expected, output) def test_update_base_attributes(self): request = self._get_fake_patch_request() body = [ {'op': 'replace', 'path': '/name', 'value': 'fedora'}, {'op': 'replace', 'path': '/visibility', 'value': 'public'}, {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, {'op': 'replace', 'path': '/protected', 'value': True}, {'op': 'replace', 'path': '/container_format', 'value': 'bare'}, {'op': 'replace', 'path': '/disk_format', 'value': 'raw'}, {'op': 'replace', 'path': '/min_ram', 'value': 128}, {'op': 'replace', 'path': '/min_disk', 'value': 10}, {'op': 'replace', 'path': '/locations', 'value': []}, {'op': 'replace', 'path': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]} ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['visibility'], 'value': 'public'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['protected'], 'value': True}, {'json_schema_version': 10, 'op': 'replace', 'path': ['container_format'], 'value': 'bare'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['disk_format'], 'value': 'raw'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['min_ram'], 'value': 128}, {'json_schema_version': 10, 'op': 'replace', 'path': ['min_disk'], 'value': 10}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]} ]} self.assertEqual(expected, output) def test_update_disallowed_attributes(self): samples = { 'direct_url': '/a/b/c/d', 'self': '/e/f/g/h', 'file': '/e/f/g/h/file', 'schema': '/i/j/k', } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_readonly_attributes(self): samples = { 'id': '00000000-0000-0000-0000-000000000000', 'status': 'active', 'checksum': 'abcdefghijklmnopqrstuvwxyz012345', 'os_hash_algo': 'supersecure', 'os_hash_value': 'a' * 32 + 'b' * 32 + 'c' * 32 + 'd' * 32, 'size': 9001, 'virtual_size': 9001, 'created_at': ISOTIME, 'updated_at': ISOTIME, } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_reserved_attributes(self): samples = { 'deleted': False, 'deleted_at': ISOTIME, 'os_glance_import_task': 'foo', 'os_glance_anything': 'bar', 'os_glance_': 'baz', 'os_glance': 'bat', } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_invalid_attributes(self): keys = [ 'noslash', '///twoslash', '/two/ /slash', '/ / ', '/trailingslash/', '/lone~tilde', '/trailingtilde~' ] for key in keys: request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '%s' % key, 'value': 'dummy'}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPBadRequest: pass # desired behavior else: self.fail("Updating %s did not result in HTTPBadRequest" % key) def test_update_pointer_encoding(self): samples = { '/keywith~1slash': ['keywith/slash'], '/keywith~0tilde': ['keywith~tilde'], '/tricky~01': ['tricky~1'], } for encoded, decoded in samples.items(): request = self._get_fake_patch_request() doc = [{'op': 'replace', 'path': '%s' % encoded, 'value': 'dummy'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) self.assertEqual(decoded, output['changes'][0]['path']) def test_update_deep_limited_attributes(self): samples = { 'locations/1/2': [], } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPBadRequest: pass # desired behavior else: self.fail("Updating %s did not result in HTTPBadRequest" % key) def test_update_v2_1_missing_operations(self): request = self._get_fake_patch_request() body = [{'path': '/colburn', 'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_1_missing_value(self): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/colburn'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_1_missing_path(self): request = self._get_fake_patch_request() body = [{'op': 'replace', 'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_multiple_operations(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'replace': '/foo', 'add': '/bar', 'value': 'snore'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_missing_operations(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_missing_value(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'replace': '/colburn'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_index(self): marker = str(uuid.uuid4()) path = '/images?limit=1&marker=%s&member_status=pending' % marker request = unit_test_utils.get_fake_request(path) expected = {'limit': 1, 'marker': marker, 'sort_key': ['created_at'], 'sort_dir': ['desc'], 'member_status': 'pending', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_with_filter(self): name = 'My Little Image' path = '/images?name=%s' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) def test_index_strip_params_from_filters(self): name = 'My Little Image' path = '/images?name=%s' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(1, len(output['filters'])) def test_index_with_many_filter(self): name = 'My Little Image' instance_id = str(uuid.uuid4()) path = ('/images?name=%(name)s&id=%(instance_id)s' % {'name': name, 'instance_id': instance_id}) request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(instance_id, output['filters']['id']) def test_index_with_filter_and_limit(self): name = 'My Little Image' path = '/images?name=%s&limit=1' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(1, output['limit']) def test_index_non_integer_limit(self): request = unit_test_utils.get_fake_request('/images?limit=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_zero_limit(self): request = unit_test_utils.get_fake_request('/images?limit=0') expected = {'limit': 0, 'sort_key': ['created_at'], 'member_status': 'accepted', 'sort_dir': ['desc'], 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_negative_limit(self): request = unit_test_utils.get_fake_request('/images?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_fraction(self): request = unit_test_utils.get_fake_request('/images?limit=1.1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_invalid_status(self): path = '/images?member_status=blah' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_marker(self): marker = str(uuid.uuid4()) path = '/images?marker=%s' % marker request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(marker, output.get('marker')) def test_index_marker_not_specified(self): request = unit_test_utils.get_fake_request('/images') output = self.deserializer.index(request) self.assertNotIn('marker', output) def test_index_limit_not_specified(self): request = unit_test_utils.get_fake_request('/images') output = self.deserializer.index(request) self.assertNotIn('limit', output) def test_index_sort_key_id(self): request = unit_test_utils.get_fake_request('/images?sort_key=id') output = self.deserializer.index(request) expected = { 'sort_key': ['id'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {} } self.assertEqual(expected, output) def test_index_multiple_sort_keys(self): request = unit_test_utils.get_fake_request('/images?' 'sort_key=name&' 'sort_key=size') output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {} } self.assertEqual(expected, output) def test_index_invalid_multiple_sort_keys(self): # blah is an invalid sort key request = unit_test_utils.get_fake_request('/images?' 'sort_key=name&' 'sort_key=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dir_asc(self): request = unit_test_utils.get_fake_request('/images?sort_dir=asc') output = self.deserializer.index(request) expected = { 'sort_key': ['created_at'], 'sort_dir': ['asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_multiple_sort_dirs(self): req_string = ('/images?sort_key=name&sort_dir=asc&' 'sort_key=id&sort_dir=desc') request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id'], 'sort_dir': ['asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_single_key_default_dir(self): req_string = '/images?sort=name' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_single_key_desc_dir(self): req_string = '/images?sort=name:desc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_default_dir(self): req_string = '/images?sort=name,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_asc_dir(self): req_string = '/images?sort=name:asc,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['asc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_different_dirs(self): req_string = '/images?sort=name:desc,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_optional_dir(self): req_string = '/images?sort=name:asc,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name,id:asc,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id', 'size'], 'sort_dir': ['desc', 'asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name:asc,id,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id', 'size'], 'sort_dir': ['asc', 'desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_sort_wrong_sort_dirs_number(self): req_string = '/images?sort_key=name&sort_dir=asc&sort_dir=desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dirs_fewer_than_keys(self): req_string = ('/images?sort_key=name&sort_dir=asc&sort_key=id&' 'sort_dir=asc&sort_key=created_at') request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_wrong_sort_dirs_number_without_key(self): req_string = '/images?sort_dir=asc&sort_dir=desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_private_key(self): request = unit_test_utils.get_fake_request('/images?sort_key=min_ram') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_key_invalid_value(self): # blah is an invalid sort key request = unit_test_utils.get_fake_request('/images?sort_key=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dir_invalid_value(self): # foo is an invalid sort dir request = unit_test_utils.get_fake_request('/images?sort_dir=foo') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_new_sorting_syntax_invalid_request(self): # 'blah' is not a supported sorting key req_string = '/images?sort=blah' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) req_string = '/images?sort=name,blah' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) # 'foo' isn't a valid sort direction req_string = '/images?sort=name:foo' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) # 'asc:desc' isn't a valid sort direction req_string = '/images?sort=name:asc:desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_combined_sorting_syntax(self): req_string = '/images?sort_dir=name&sort=name' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_with_tag(self): path = '/images?tag=%s&tag=%s' % ('x86', '64bit') request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(sorted(['x86', '64bit']), sorted(output['filters']['tags'])) def test_image_import(self): # Bug 1754634: make sure that what's considered valid # is determined by the config option self.config(enabled_import_methods=['party-time']) request = unit_test_utils.get_fake_request() import_body = { "method": { "name": "party-time" } } request.body = jsonutils.dump_as_bytes(import_body) output = self.deserializer.import_image(request) expected = {"body": import_body} self.assertEqual(expected, output) def test_import_image_invalid_body(self): request = unit_test_utils.get_fake_request() import_body = { "method1": { "name": "glance-direct" } } request.body = jsonutils.dump_as_bytes(import_body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.import_image, request) def test_import_image_invalid_input(self): request = unit_test_utils.get_fake_request() import_body = { "method": { "abcd": "glance-direct" } } request.body = jsonutils.dump_as_bytes(import_body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.import_image, request) def test_import_image_with_all_stores_not_boolean(self): request = unit_test_utils.get_fake_request() import_body = { 'method': { 'name': 'glance-direct' }, 'all_stores': "true" } request.body = jsonutils.dump_as_bytes(import_body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.import_image, request) def test_import_image_with_allow_failure_not_boolean(self): request = unit_test_utils.get_fake_request() import_body = { 'method': { 'name': 'glance-direct' }, 'all_stores_must_succeed': "true" } request.body = jsonutils.dump_as_bytes(import_body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.import_image, request) def _get_request_for_method(self, method_name): request = unit_test_utils.get_fake_request() import_body = { "method": { "name": method_name } } request.body = jsonutils.dump_as_bytes(import_body) return request KNOWN_IMPORT_METHODS = ['glance-direct', 'web-download', 'glance-download'] def test_import_image_invalid_import_method(self): # Bug 1754634: make sure that what's considered valid # is determined by the config option. So put known bad # name in config, and known good name in request self.config(enabled_import_methods=['bad-method-name']) for m in self.KNOWN_IMPORT_METHODS: request = self._get_request_for_method(m) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.import_image, request) def test_add_location_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.add_location, request) def test_add_location(self): request = unit_test_utils.get_fake_request() body = { 'url': 'scheme1://path1', } request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.add_location(request) expected = {"body": body} self.assertEqual(expected, output) def test_add_location_with_invalid_body(self): request = self._get_fake_patch_request() body = { 'do_secure_hash': True } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'os_hash_algo': 'sha123', 'os_hash_value': MULTIHASH1, } } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'os_hash_algo': 'sha123', } } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'os_hash_value': MULTIHASH1, } } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, 'bogus_value': 'test' } } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'bogus_value': 'test' } } request.body = jsonutils.dump_as_bytes(body) self.assertRaisesRegex( webob.exc.HTTPBadRequest, 'Provided object does not match schema', self.deserializer.add_location, request) body = { 'url': 'scheme1://path2', 'validation_data': { 'os_hash_algo': 'sha512', 'os_hash_value': MULTIHASH1, } } request.body = jsonutils.dump_as_bytes(body) self.deserializer.add_location(request) class TestImagesDeserializerWithExtendedSchema(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializerWithExtendedSchema, self).setUp() custom_image_properties = { 'pants': { 'type': 'string', 'enum': ['on', 'off'], }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.deserializer = glance.api.v2.images.RequestDeserializer(schema) def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'name': 'image-1', 'pants': 'on' }) output = self.deserializer.create(request) expected = { 'image': {'name': 'image-1'}, 'extra_properties': {'pants': 'on'}, 'tags': [], } self.assertEqual(expected, output) def test_create_bad_data(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'name': 'image-1', 'pants': 'borked' }) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/pants', 'value': 'off'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'add', 'path': ['pants'], 'value': 'off'}, ]} self.assertEqual(expected, output) def test_update_bad_data(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/pants', 'value': 'cutoffs'}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) class TestImagesDeserializerWithAdditionalProperties(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializerWithAdditionalProperties, self).setUp() self.deserializer = glance.api.v2.images.RequestDeserializer() def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'foo': 'bar'}) output = self.deserializer.create(request) expected = {'image': {}, 'extra_properties': {'foo': 'bar'}, 'tags': []} self.assertEqual(expected, output) def test_create_with_numeric_property(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'abc': 123}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_with_numeric_property(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': 123}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_create_with_list_property(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'foo': ['bar']}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_with_list_property(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': ['bar', 'baz']}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) change = { 'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'bar' } self.assertEqual({'changes': [change]}, output) class TestImagesSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializer, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.fixtures = [ # NOTE(bcwaldon): This first fixture has every property defined _domain_fixture(UUID1, name='image-1', size=1024, virtual_size=3072, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1), # NOTE(bcwaldon): This second fixture depends on default behavior # and sets most values to None _domain_fixture(UUID2, created_at=DATETIME, updated_at=DATETIME), ] def test_index(self): expected = { 'images': [ { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', }, { 'id': UUID2, 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'tags': set([]), 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'size': None, 'name': None, 'owner': None, 'min_ram': None, 'min_disk': None, 'checksum': None, 'os_hash_algo': None, 'os_hash_value': None, 'disk_format': None, 'virtual_size': None, 'container_format': None, }, ], 'first': '/v2/images', 'schema': '/v2/schemas/images', } request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {'images': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) for image in actual['images']: image['tags'] = set(image['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_index_next_marker(self): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {'images': self.fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) self.assertEqual('/v2/images?marker=%s' % UUID2, output['next']) def test_index_carries_query_parameters(self): url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) result = {'images': self.fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) expected_url = '/v2/images?limit=10&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), unit_test_utils.sort_url_by_qs_keys(output['first'])) expect_next = '/v2/images?limit=10&marker=%s&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys( expect_next % UUID2), unit_test_utils.sort_url_by_qs_keys(output['next'])) def test_index_forbidden_get_image_location(self): """Make sure the serializer works fine. No matter if current user is authorized to get image location if the show_multiple_locations is False. """ class ImageLocations(object): def __len__(self): raise exception.Forbidden() self.config(show_multiple_locations=False) self.config(show_image_direct_url=False) url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) result = {'images': self.fixtures} self.assertEqual(http.OK, response.status_int) # The image index should work though the user is forbidden result['images'][0].locations = ImageLocations() self.serializer.index(response, result) self.assertEqual(http.OK, response.status_int) def test_show_full_fixture(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.show(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show_minimal_fixture(self): expected = { 'id': UUID2, 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'tags': [], 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'size': None, 'name': None, 'owner': None, 'min_ram': None, 'min_disk': None, 'checksum': None, 'os_hash_algo': None, 'os_hash_value': None, 'disk_format': None, 'virtual_size': None, 'container_format': None, } response = webob.Response() self.serializer.show(response, self.fixtures[1]) self.assertEqual(expected, jsonutils.loads(response.body)) def test_create(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': ['one', 'two'], 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(http.CREATED, response.status_int) actual = jsonutils.loads(response.body) actual['tags'] = sorted(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) self.assertEqual('/v2/images/%s' % UUID1, response.location) def test_create_has_import_methods_header(self): # NOTE(rosmaita): enabled_import_methods is defined as type # oslo.config.cfg.ListOpt, so it is stored internally as a list # but is converted to a string for output in the HTTP header header_name = 'OpenStack-image-import-methods' # check multiple methods enabled_methods = ['one', 'two', 'three'] self.config(enabled_import_methods=enabled_methods) response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(http.CREATED, response.status_int) header_value = response.headers.get(header_name) self.assertIsNotNone(header_value) self.assertCountEqual(enabled_methods, header_value.split(',')) # check single method self.config(enabled_import_methods=['swift-party-time']) response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(http.CREATED, response.status_int) header_value = response.headers.get(header_name) self.assertIsNotNone(header_value) self.assertEqual('swift-party-time', header_value) # no header for empty config value self.config(enabled_import_methods=[]) response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(http.CREATED, response.status_int) headers = response.headers.keys() self.assertNotIn(header_name, headers) def test_update(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.update(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_import_image(self): response = webob.Response() self.serializer.import_image(response, {}) self.assertEqual(http.ACCEPTED, response.status_int) self.assertEqual('0', response.headers['Content-Length']) def test_image_stage_host_hidden(self): # Make sure that os_glance_stage_host is not exposed to clients response = webob.Response() self.serializer.show(response, mock.MagicMock(extra_properties={ 'foo': 'bar', 'os_glance_stage_host': 'http://foo'})) actual = jsonutils.loads(response.body) self.assertIn('foo', actual) self.assertNotIn('os_glance_stage_host', actual) def test_add_location(self): response = webob.Response() self.serializer.add_location(response, {}) self.assertEqual(http.ACCEPTED, response.status_int) self.assertEqual('0', response.headers['Content-Length']) class TestImagesSerializerWithUnicode(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithUnicode, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.fixtures = [ # NOTE(bcwaldon): This first fixture has every property defined _domain_fixture(UUID1, **{ 'name': 'OpenStack\u2122-1', 'size': 1024, 'virtual_size': 3072, 'tags': ['\u2160', '\u2161'], 'created_at': DATETIME, 'updated_at': DATETIME, 'owner': TENANT1, 'visibility': 'public', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'extra_properties': {'lang': 'Fran\u00E7ais', 'dispos\u00E9': 'f\u00E2ch\u00E9'}, }), ] def test_index(self): expected = { 'images': [ { 'id': UUID1, 'name': 'OpenStack\u2122-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': ['\u2160', '\u2161'], 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': str(FAKEHASHALGO), 'os_hash_value': str(MULTIHASH1), 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': str(ISOTIME), 'updated_at': str(ISOTIME), 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'lang': 'Fran\u00E7ais', 'dispos\u00E9': 'f\u00E2ch\u00E9', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', }, ], 'first': '/v2/images', 'schema': '/v2/schemas/images', } request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {'images': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) actual['images'][0]['tags'] = sorted(actual['images'][0]['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show_full_fixture(self): expected = { 'id': UUID1, 'name': 'OpenStack\u2122-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': set(['\u2160', '\u2161']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': str(FAKEHASHALGO), 'os_hash_value': str(MULTIHASH1), 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': str(ISOTIME), 'updated_at': str(ISOTIME), 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'lang': 'Fran\u00E7ais', 'dispos\u00E9': 'f\u00E2ch\u00E9', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.show(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): expected = { 'id': UUID1, 'name': 'OpenStack\u2122-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': ['\u2160', '\u2161'], 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': str(FAKEHASHALGO), 'os_hash_value': str(MULTIHASH1), 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': str(ISOTIME), 'updated_at': str(ISOTIME), 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'lang': 'Fran\u00E7ais', 'dispos\u00E9': 'f\u00E2ch\u00E9', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(http.CREATED, response.status_int) actual = jsonutils.loads(response.body) actual['tags'] = sorted(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) self.assertEqual('/v2/images/%s' % UUID1, response.location) def test_update(self): expected = { 'id': UUID1, 'name': 'OpenStack\u2122-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'os_hidden': False, 'tags': set(['\u2160', '\u2161']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': str(FAKEHASHALGO), 'os_hash_value': str(MULTIHASH1), 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': str(ISOTIME), 'updated_at': str(ISOTIME), 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'lang': 'Fran\u00E7ais', 'dispos\u00E9': 'f\u00E2ch\u00E9', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.update(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) class TestImagesSerializerWithExtendedSchema(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithExtendedSchema, self).setUp() custom_image_properties = { 'color': { 'type': 'string', 'enum': ['red', 'green'], }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.serializer = glance.api.v2.images.ResponseSerializer(schema) props = dict(color='green', mood='grouchy') self.fixture = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) def test_show(self): expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'tags': [], 'size': 1024, 'virtual_size': 3072, 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'color': 'green', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, 'mood': 'grouchy', } response = webob.Response() self.serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) def test_show_reports_invalid_data(self): self.fixture.extra_properties['color'] = 'invalid' expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'tags': [], 'size': 1024, 'virtual_size': 3072, 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'color': 'invalid', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, 'mood': 'grouchy', } response = webob.Response() self.serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) class TestImagesSerializerWithAdditionalProperties(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithAdditionalProperties, self).setUp() self.fixture = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', os_hash_algo=FAKEHASHALGO, os_hash_value=MULTIHASH1, created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties={'marx': 'groucho'}) def test_show(self): serializer = glance.api.v2.images.ResponseSerializer() expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'marx': 'groucho', 'tags': [], 'size': 1024, 'virtual_size': 3072, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) def test_show_invalid_additional_property(self): """Ensure that the serializer passes through invalid additional properties. It must not complains with i.e. non-string. """ serializer = glance.api.v2.images.ResponseSerializer() self.fixture.extra_properties['marx'] = 123 expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'os_hidden': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'os_hash_algo': FAKEHASHALGO, 'os_hash_value': MULTIHASH1, 'marx': 123, 'tags': [], 'size': 1024, 'virtual_size': 3072, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) class TestImagesSerializerDirectUrl(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerDirectUrl, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.active_image = _domain_fixture( UUID1, name='image-1', visibility='public', status='active', size=1024, virtual_size=3072, created_at=DATETIME, updated_at=DATETIME, locations=[{'id': '1', 'url': 'http://some/fake/location', 'metadata': {}, 'status': 'active'}]) self.queued_image = _domain_fixture( UUID2, name='image-2', status='active', created_at=DATETIME, updated_at=DATETIME, checksum='ca425b88f047ce8ec45ee90e813ada91') self.location_data_image_url = 'http://abc.com/somewhere' self.location_data_image_meta = {'key': 98231} self.location_data_image = _domain_fixture( UUID2, name='image-2', status='active', created_at=DATETIME, updated_at=DATETIME, locations=[{'id': '2', 'url': self.location_data_image_url, 'metadata': self.location_data_image_meta, 'status': 'active'}]) def _do_index(self): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.index(response, {'images': [self.active_image, self.queued_image]}) return jsonutils.loads(response.body)['images'] def _do_show(self, image): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.show(response, image) return jsonutils.loads(response.body) def test_index_store_location_enabled(self): self.config(show_image_direct_url=True) images = self._do_index() # NOTE(markwash): ordering sanity check self.assertEqual(UUID1, images[0]['id']) self.assertEqual(UUID2, images[1]['id']) self.assertEqual('http://some/fake/location', images[0]['direct_url']) self.assertNotIn('direct_url', images[1]) def test_index_store_multiple_location_enabled(self): self.config(show_multiple_locations=True) request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.index(response, {'images': [self.location_data_image]}), images = jsonutils.loads(response.body)['images'] location = images[0]['locations'][0] self.assertEqual(location['url'], self.location_data_image_url) self.assertEqual(location['metadata'], self.location_data_image_meta) def test_index_store_location_explicitly_disabled(self): self.config(show_image_direct_url=False) images = self._do_index() self.assertNotIn('direct_url', images[0]) self.assertNotIn('direct_url', images[1]) def test_show_location_enabled(self): self.config(show_image_direct_url=True) image = self._do_show(self.active_image) self.assertEqual('http://some/fake/location', image['direct_url']) def test_show_location_enabled_but_not_set(self): self.config(show_image_direct_url=True) image = self._do_show(self.queued_image) self.assertNotIn('direct_url', image) def test_show_location_explicitly_disabled(self): self.config(show_image_direct_url=False) image = self._do_show(self.active_image) self.assertNotIn('direct_url', image) class TestImageSchemaFormatConfiguration(test_utils.BaseTestCase): def test_default_disk_formats(self): schema = glance.api.v2.images.get_schema() expected = [None, 'ami', 'ari', 'aki', 'vhd', 'vhdx', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso', 'ploop'] actual = schema.properties['disk_format']['enum'] self.assertEqual(expected, actual) def test_custom_disk_formats(self): self.config(disk_formats=['gabe'], group="image_format") schema = glance.api.v2.images.get_schema() expected = [None, 'gabe'] actual = schema.properties['disk_format']['enum'] self.assertEqual(expected, actual) def test_default_container_formats(self): schema = glance.api.v2.images.get_schema() expected = [None, 'ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker', 'compressed'] actual = schema.properties['container_format']['enum'] self.assertEqual(expected, actual) def test_custom_container_formats(self): self.config(container_formats=['mark'], group="image_format") schema = glance.api.v2.images.get_schema() expected = [None, 'mark'] actual = schema.properties['container_format']['enum'] self.assertEqual(expected, actual) class TestImageSchemaDeterminePropertyBasis(test_utils.BaseTestCase): def test_custom_property_marked_as_non_base(self): custom_image_properties = { 'pants': { 'type': 'string', }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.assertFalse(schema.properties['pants'].get('is_base', True)) def test_base_property_marked_as_base(self): schema = glance.api.v2.images.get_schema() self.assertTrue(schema.properties['disk_format'].get('is_base', True)) class TestMultiImagesController(base.MultiIsolatedUnitTest): def setUp(self): super(TestMultiImagesController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = store self._create_images() self._create_image_members() stores = {'cheap': 'file', 'fast': 'file', 'empty': 'file'} self.config(enabled_backends=stores) self.store.register_store_opts(CONF) self.controller = glance.api.v2.images.ImagesController(self.db, self.policy, self.notifier, self.store) def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, name='1', size=256, virtual_size=1024, visibility='public', locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}], disk_format='raw', container_format='bare', status='active', created_at=DATETIME), _db_fixture(UUID2, owner=TENANT1, checksum=CHKSUM1, name='2', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, locations=[{'url': 'file://%s/%s' % (self.test_dir, UUID2), 'metadata': {}, 'status': 'active'}], created_at=DATETIME + datetime.timedelta(seconds=1)), _db_fixture(UUID5, owner=TENANT3, checksum=CHKSUM1, name='2', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, locations=[{'url': 'file://%s/%s' % (self.test_dir, UUID2), 'metadata': {}, 'status': 'active'}], created_at=DATETIME + datetime.timedelta(seconds=1)), _db_fixture(UUID3, owner=TENANT3, checksum=CHKSUM1, name='3', size=512, virtual_size=2048, visibility='public', tags=['windows', '64bit', 'x86'], created_at=DATETIME + datetime.timedelta(seconds=2)), _db_fixture(UUID4, owner=TENANT4, name='4', size=1024, virtual_size=3072, created_at=DATETIME + datetime.timedelta(seconds=3)), _db_fixture(UUID6, owner=TENANT3, checksum=CHKSUM1, name='3', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, locations=[{'url': 'file://%s/%s' % (self.test_dir, UUID6), 'metadata': {'store': 'fast'}, 'status': 'active'}, {'url': 'file://%s/%s' % (self.test_dir2, UUID6), 'metadata': {'store': 'cheap'}, 'status': 'active'}], created_at=DATETIME + datetime.timedelta(seconds=1)), _db_fixture(UUID7, owner=TENANT3, checksum=CHKSUM1, name='3', size=512, virtual_size=2048, visibility='public', disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}, locations=[{'url': 'file://%s/%s' % (self.test_dir, UUID7), 'metadata': {'store': 'fast'}, 'status': 'active'}], created_at=DATETIME + datetime.timedelta(seconds=1)), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID4, TENANT2), _db_image_member_fixture(UUID4, TENANT3, status='accepted'), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_image_import_image_not_exist(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.import_image, request, 'invalid_image', {'method': {'name': 'glance-direct'}}) def test_image_import_with_active_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID2, {'method': {'name': 'glance-direct'}}) def test_delete_from_store_as_non_owner(self): enforcer = unit_test_utils.enforcer_from_rules({ "get_image": "", "delete_image_location": "'TENANT4':%(owner)s", "get_image_location": "" }) request = unit_test_utils.get_fake_request() self.controller.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_from_store, request, "fast", UUID6) def test_delete_from_store_non_active(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete_from_store, request, "fast", UUID3) def test_delete_from_store_no_image(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_from_store, request, "fast", "nonexisting") def test_delete_from_store_invalid_store(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete_from_store, request, "burn", UUID6) def test_delete_from_store_not_in_store(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete_from_store, request, "empty", UUID6) def test_delete_from_store_one_location(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete_from_store, request, "fast", UUID7) def test_delete_from_store_as_non_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.controller.delete_from_store(request, "fast", UUID6) image = self.controller.show(request, UUID6) self.assertEqual(1, len(image.locations)) self.assertEqual("cheap", image.locations[0]['metadata']['store']) def test_delete_from_store_as_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete_from_store(request, "fast", UUID6) image = self.controller.show(request, UUID6) self.assertEqual(1, len(image.locations)) self.assertEqual("cheap", image.locations[0]['metadata']['store']) def test_image_lazy_loading_store(self): # assert existing image does not have store in metadata existing_image = self.images[1] self.assertNotIn('store', existing_image['locations'][0]['metadata']) # assert: store information will be added by lazy loading request = unit_test_utils.get_fake_request() with mock.patch.object(store_utils, "_get_store_id_from_uri") as mock_uri: mock_uri.return_value = "fast" image = self.controller.show(request, UUID2) for loc in image.locations: self.assertIn('store', loc['metadata']) def test_image_lazy_loading_store_different_owner(self): # assert existing image does not have store in metadata existing_image = self.images[2] self.assertNotIn('store', existing_image['locations'][0]['metadata']) # assert: store information will be added by lazy loading even if owner # is different request = unit_test_utils.get_fake_request() request.headers.update({'X-Tenant_id': TENANT1}) with mock.patch.object(store_utils, "_get_store_id_from_uri") as mock_uri: mock_uri.return_value = "fast" image = self.controller.show(request, UUID5) for loc in image.locations: self.assertIn('store', loc['metadata']) def test_image_import_invalid_backend_in_request_header(self): request = unit_test_utils.get_fake_request() request.headers['x-image-meta-store'] = 'dummy' with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict_if_disk_format_is_none(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(disk_format=None) self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='queued') self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'glance-direct'}}) def test_image_import_raises_conflict_for_web_download(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID4, {'method': {'name': 'web-download'}}) def test_copy_image_stores_specified_in_header_and_body(self): request = unit_test_utils.get_fake_request() request.headers['x-image-meta-store'] = 'fast' with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID7, {'method': {'name': 'copy-image'}, 'stores': ["fast"]}) def test_copy_image_non_existing_image(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.side_effect = exception.NotFound self.assertRaises(webob.exc.HTTPNotFound, self.controller.import_image, request, UUID1, {'method': {'name': 'copy-image'}, 'stores': ["fast"]}) def test_copy_image_with_all_stores(self): request = unit_test_utils.get_fake_request() locations = {'url': 'file://%s/%s' % (self.test_dir, UUID7), 'metadata': {'store': 'fast'}, 'status': 'active'}, with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: with mock.patch.object(self.store, 'get_store_from_store_identifier'): mock_get.return_value = FakeImage(id=UUID7, status='active', locations=locations) self.assertIsNotNone(self.controller.import_image( request, UUID7, {'method': {'name': 'copy-image'}, 'all_stores': True})) def test_copy_non_active_image(self): request = unit_test_utils.get_fake_request() with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(status='uploading') self.assertRaises(webob.exc.HTTPConflict, self.controller.import_image, request, UUID1, {'method': {'name': 'copy-image'}, 'stores': ["fast"]}) def test_copy_image_in_existing_store(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.import_image, request, UUID6, {'method': {'name': 'copy-image'}, 'stores': ["fast"]}) def test_copy_image_to_other_stores(self): request = unit_test_utils.get_fake_request() locations = {'url': 'file://%s/%s' % (self.test_dir, UUID7), 'metadata': {'store': 'fast'}, 'status': 'active'}, with mock.patch.object( glance.notifier.ImageRepoProxy, 'get') as mock_get: mock_get.return_value = FakeImage(id=UUID7, status='active', locations=locations) output = self.controller.import_image( request, UUID7, {'method': {'name': 'copy-image'}, 'stores': ["cheap"]}) self.assertEqual(UUID7, output) @mock.patch.object(glance.notifier.TaskFactoryProxy, 'new_task') @mock.patch.object(glance.quota, '_calc_required_size') @mock.patch.object(glance.location, '_check_image_location') @mock.patch.object(glance.location.ImageRepoProxy, '_set_acls') @mock.patch.object(store, 'get_size_from_uri_and_backend') def test_add_location(self, mock_get_size_uri, mock_set_acls, mock_check_loc, mock_calc, mock_task): self.config(do_secure_hash=True) mock_calc.return_value = 1 mock_get_size_uri.return_value = 1 image_id = str(uuid.uuid4()) self.images = [ _db_fixture(image_id, owner=TENANT1, name='1', disk_format='raw', container_format='bare', status='queued', checksum=None, os_hash_algo=None, os_hash_value=None), ] self.db.image_create(None, self.images[0]) url = 'file://%s/%s' % (self.test_dir, UUID7) task_input = { "image_id": image_id, "loc_url": url, "validation_data": {} } request = unit_test_utils.get_fake_request(roles=['service']) req_body = {'url': url} self.controller.add_location(request, image_id, req_body) mock_task.assert_called_with(task_type='location_import', owner=TENANT1, task_input=task_input, image_id=image_id, user_id=request.context.user_id, request_id=request.context.request_id) class TestProxyHelpers(base.IsolatedUnitTest): def test_proxy_response_error(self): e = glance.api.v2.images.proxy_response_error(123, 'Foo') self.assertIsInstance(e, webob.exc.HTTPError) self.assertEqual(123, e.code) self.assertEqual('123 Foo', e.status) def test_is_proxyable(self): controller = glance.api.v2.images.ImagesController(None, None, None, None) self.config(worker_self_reference_url='http://worker1') mock_image = mock.MagicMock(extra_properties={}) self.assertFalse(controller.is_proxyable(mock_image)) mock_image.extra_properties['os_glance_stage_host'] = 'http://worker1' self.assertFalse(controller.is_proxyable(mock_image)) mock_image.extra_properties['os_glance_stage_host'] = 'http://worker2' self.assertTrue(controller.is_proxyable(mock_image)) def test_self_url(self): controller = glance.api.v2.images.ImagesController(None, None, None, None) self.assertIsNone(controller.self_url) self.config(public_endpoint='http://lb.example.com') self.assertEqual('http://lb.example.com', controller.self_url) self.config(worker_self_reference_url='http://worker1.example.com') self.assertEqual('http://worker1.example.com', controller.self_url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_metadef_resources.py0000664000175000017500000030452100000000000024074 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_serialization import jsonutils import webob import wsme from glance.api import policy from glance.api.v2 import metadef_namespaces as namespaces from glance.api.v2 import metadef_objects as objects from glance.api.v2 import metadef_properties as properties from glance.api.v2 import metadef_resource_types as resource_types from glance.api.v2 import metadef_tags as tags import glance.gateway from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' NAMESPACE1 = 'Namespace1' NAMESPACE2 = 'Namespace2' NAMESPACE3 = 'Namespace3' NAMESPACE4 = 'Namespace4' NAMESPACE5 = 'Namespace5' NAMESPACE6 = 'Namespace6' PROPERTY1 = 'Property1' PROPERTY2 = 'Property2' PROPERTY3 = 'Property3' PROPERTY4 = 'Property4' OBJECT1 = 'Object1' OBJECT2 = 'Object2' OBJECT3 = 'Object3' RESOURCE_TYPE1 = 'ResourceType1' RESOURCE_TYPE2 = 'ResourceType2' RESOURCE_TYPE3 = 'ResourceType3' RESOURCE_TYPE4 = 'ResourceType4' TAG1 = 'Tag1' TAG2 = 'Tag2' TAG3 = 'Tag3' TAG4 = 'Tag4' TAG5 = 'Tag5' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' PREFIX1 = 'pref' def _db_namespace_fixture(namespace, **kwargs): obj = { 'namespace': namespace, 'display_name': None, 'description': None, 'visibility': 'public', 'protected': False, 'owner': None, } obj.update(kwargs) return obj def _db_property_fixture(name, **kwargs): obj = { 'name': name, 'json_schema': {"type": "string", "title": "title"}, } obj.update(kwargs) return obj def _db_object_fixture(name, **kwargs): obj = { 'name': name, 'description': None, 'json_schema': {}, 'required': '[]', } obj.update(kwargs) return obj def _db_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'protected': False, } obj.update(kwargs) return obj def _db_tag_fixture(name, **kwargs): obj = { 'name': name } obj.update(kwargs) return obj def _db_tags_fixture(tag_names=None): tag_list = [] if not tag_names: tag_names = [TAG1, TAG2, TAG3] for tag_name in tag_names: tag = tags.MetadefTag() tag.name = tag_name tag_list.append(tag) return tag_list def _db_namespace_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'properties_target': None, 'prefix': None, } obj.update(kwargs) return obj class TestMetadefsControllers(base.IsolatedUnitTest): def setUp(self): super(TestMetadefsControllers, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = policy.Enforcer() self.notifier = unit_test_utils.FakeNotifier() self._create_namespaces() self._create_properties() self._create_objects() self._create_resource_types() self._create_namespaces_resource_types() self._create_tags() self.namespace_controller = namespaces.NamespaceController( self.db, self.policy, self.notifier) self.property_controller = properties.NamespacePropertiesController( self.db, self.policy, self.notifier) self.object_controller = objects.MetadefObjectsController( self.db, self.policy, self.notifier) self.rt_controller = resource_types.ResourceTypeController( self.db, self.policy, self.notifier) self.tag_controller = tags.TagsController( self.db, self.policy, self.notifier) self.deserializer = objects.RequestDeserializer() self.property_deserializer = properties.RequestDeserializer() def _create_namespaces(self): req = unit_test_utils.get_fake_request() self.namespaces = [ _db_namespace_fixture(NAMESPACE1, owner=TENANT1, visibility='private', protected=True), _db_namespace_fixture(NAMESPACE2, owner=TENANT2, visibility='private'), _db_namespace_fixture(NAMESPACE3, owner=TENANT3), _db_namespace_fixture(NAMESPACE5, owner=TENANT4), _db_namespace_fixture(NAMESPACE6, owner=TENANT4), ] [self.db.metadef_namespace_create(req.context, namespace) for namespace in self.namespaces] def _create_properties(self): req = unit_test_utils.get_fake_request() self.properties = [ (NAMESPACE3, _db_property_fixture(PROPERTY1)), (NAMESPACE3, _db_property_fixture(PROPERTY2)), (NAMESPACE1, _db_property_fixture(PROPERTY1)), (NAMESPACE6, _db_property_fixture(PROPERTY4)), ] [self.db.metadef_property_create(req.context, namespace, property) for namespace, property in self.properties] def _create_objects(self): req = unit_test_utils.get_fake_request() self.objects = [ (NAMESPACE3, _db_object_fixture(OBJECT1)), (NAMESPACE3, _db_object_fixture(OBJECT2)), (NAMESPACE1, _db_object_fixture(OBJECT1)), ] [self.db.metadef_object_create(req.context, namespace, object) for namespace, object in self.objects] def _create_resource_types(self): req = unit_test_utils.get_fake_request() self.resource_types = [ _db_resource_type_fixture(RESOURCE_TYPE1), _db_resource_type_fixture(RESOURCE_TYPE2), _db_resource_type_fixture(RESOURCE_TYPE4), ] [self.db.metadef_resource_type_create(req.context, resource_type) for resource_type in self.resource_types] def _create_tags(self): req = unit_test_utils.get_fake_request() self.tags = [ (NAMESPACE3, _db_tag_fixture(TAG1)), (NAMESPACE3, _db_tag_fixture(TAG2)), (NAMESPACE1, _db_tag_fixture(TAG1)), ] [self.db.metadef_tag_create(req.context, namespace, tag) for namespace, tag in self.tags] def _create_namespaces_resource_types(self): req = unit_test_utils.get_fake_request(roles=['admin']) self.ns_resource_types = [ (NAMESPACE1, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE3, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE2)), (NAMESPACE6, _db_namespace_resource_type_fixture(RESOURCE_TYPE4, prefix=PREFIX1)), ] [self.db.metadef_resource_type_association_create(req.context, namespace, ns_resource_type) for namespace, ns_resource_type in self.ns_resource_types] def assertNotificationLog(self, expected_event_type, expected_payloads): events = [{'type': expected_event_type, 'payload': payload} for payload in expected_payloads] self.assertNotificationsLog(events) def assertNotificationsLog(self, expected_events): output_logs = self.notifier.get_logs() expected_logs_count = len(expected_events) self.assertEqual(expected_logs_count, len(output_logs)) for output_log, event in zip(output_logs, expected_events): self.assertEqual('INFO', output_log['notification_type']) self.assertEqual(event['type'], output_log['event_type']) self.assertLessEqual(event['payload'].items(), output_log['payload'].items()) self.notifier.log = [] def test_namespace_index(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.index(request) output = output.to_dict() self.assertEqual(4, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) output = self.namespace_controller.index(request) output = output.to_dict() self.assertEqual(5, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE2, NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_visibility_public(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) filters = {'visibility': 'public'} output = self.namespace_controller.index(request, filters=filters) output = output.to_dict() self.assertEqual(3, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_resource_type(self): request = unit_test_utils.get_fake_request() filters = {'resource_types': [RESOURCE_TYPE1]} output = self.namespace_controller.index(request, filters=filters) output = output.to_dict() self.assertEqual(2, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE3]) self.assertEqual(expected, actual) def test_namespace_index_resource_type_delete_race(self): request = unit_test_utils.get_fake_request() filters = {'resource_types': [RESOURCE_TYPE1]} real_gmrtr = self.namespace_controller.gateway.\ get_metadef_resource_type_repo def race_delete(*a, **k): self.db.metadef_namespace_delete(request.context, NAMESPACE3) return real_gmrtr(*a, **k) with mock.patch.object(self.namespace_controller.gateway, 'get_metadef_resource_type_repo') as g: # NOTE(danms): We simulate a late delete of one of our # namespaces by hijacking the call to get the metadef RT # repo and doing a delete at that point, before we iterate # the list of namespaces we already pulled from the DB. If # the code in the index API method changes, this will need # to be updated. g.side_effect = race_delete output = self.namespace_controller.index(request, filters=filters) output = output.to_dict() self.assertEqual(2, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) # We should still see both namespaces expected = set([NAMESPACE1, NAMESPACE3]) self.assertEqual(expected, actual) # And the first (undeleted) one should have the expected # associations... self.assertEqual( 1, len(output['namespaces'][0].resource_type_associations)) # ...but the one we deleted should be empty self.assertEqual( wsme.types.Unset, output['namespaces'][1].resource_type_associations) def test_namespace_show(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.show(request, NAMESPACE1) output = output.to_dict() self.assertEqual(NAMESPACE1, output['namespace']) self.assertEqual(TENANT1, output['owner']) self.assertTrue(output['protected']) self.assertEqual('private', output['visibility']) def test_namespace_show_with_related_resources(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.show(request, NAMESPACE3) output = output.to_dict() self.assertEqual(NAMESPACE3, output['namespace']) self.assertEqual(TENANT3, output['owner']) self.assertFalse(output['protected']) self.assertEqual('public', output['visibility']) self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) self.assertEqual(1, len(output['resource_type_associations'])) actual = set([rt.name for rt in output['resource_type_associations']]) expected = set([RESOURCE_TYPE1]) self.assertEqual(expected, actual) def test_namespace_show_with_property_prefix(self): request = unit_test_utils.get_fake_request(roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE3) object = objects.MetadefObject() object.name = OBJECT3 object.required = [] property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' object.properties = {'prop1': property} object = self.object_controller.create(request, object, NAMESPACE3) self.assertNotificationsLog([ { 'type': 'metadef_resource_type.create', 'payload': { 'namespace': NAMESPACE3, 'name': RESOURCE_TYPE2, 'prefix': 'pref', 'properties_target': None, } }, { 'type': 'metadef_object.create', 'payload': { 'name': OBJECT3, 'namespace': NAMESPACE3, 'properties': [{ 'name': 'prop1', 'additionalItems': None, 'confidential': None, 'title': 'title', 'default': None, 'pattern': None, 'enum': None, 'maximum': None, 'minItems': None, 'minimum': None, 'maxItems': None, 'minLength': None, 'uniqueItems': None, 'maxLength': None, 'items': None, 'type': 'string', 'description': None }], 'required': [], 'description': None, } } ]) filters = {'resource_type': RESOURCE_TYPE2} output = self.namespace_controller.show(request, NAMESPACE3, filters) output = output.to_dict() [self.assertTrue(property_name.startswith(rt.prefix)) for property_name in output['properties'].keys()] for object in output['objects']: [self.assertTrue(property_name.startswith(rt.prefix)) for property_name in object.properties.keys()] @mock.patch('glance.api.v2.metadef_namespaces.LOG') def test_cleanup_namespace_success(self, mock_log): fake_gateway = glance.gateway.Gateway(db_api=self.db, notifier=self.notifier, policy_enforcer=self.policy) req = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = 'FakeNamespace' namespace = self.namespace_controller.create(req, namespace) ns_repo = fake_gateway.get_metadef_namespace_repo(req.context) self.namespace_controller._cleanup_namespace(ns_repo, namespace, True) mock_log.debug.assert_called_with( "Cleaned up namespace %(namespace)s ", {'namespace': namespace.namespace}) @mock.patch('glance.api.v2.metadef_namespaces.LOG') @mock.patch('glance.notifier.MetadefNamespaceRepoProxy.remove') def test_cleanup_namespace_exception(self, mock_remove, mock_log): mock_remove.side_effect = Exception('Mock remove was called') fake_gateway = glance.gateway.Gateway(db_api=self.db, notifier=self.notifier, policy_enforcer=self.policy) req = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = 'FakeNamespace' namespace = self.namespace_controller.create(req, namespace) ns_repo = fake_gateway.get_metadef_namespace_repo(req.context) self.namespace_controller._cleanup_namespace(ns_repo, namespace, True) called_msg = 'Failed to delete namespace %(namespace)s.' \ 'Exception: %(exception)s' called_args = {'exception': 'Mock remove was called', 'namespace': 'FakeNamespace'} mock_log.error.assert_called_with((called_msg, called_args)) mock_remove.assert_called_once_with(mock.ANY) def test_namespace_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, 'FakeName') def test_namespace_show_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_disabled(self): self.config(disabled_notifications=["metadef_namespace.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_group_disabled(self): self.config(disabled_notifications=["metadef_namespace"]) request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_create_disabled(self): self.config(disabled_notifications=["metadef_namespace.create"]) request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete, request, 'FakeName') self.assertNotificationsLog([]) def test_namespace_delete_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete, request, NAMESPACE2) self.assertNotificationsLog([]) def test_namespace_delete_non_visible_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_protected(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete, request, NAMESPACE1) self.assertNotificationsLog([]) def test_namespace_delete_protected_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete, request, NAMESPACE1) self.assertNotificationsLog([]) def test_namespace_delete_with_contents(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.namespace_controller.delete(request, NAMESPACE3) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE3) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, OBJECT1) def test_namespace_delete_properties(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.namespace_controller.delete_properties(request, NAMESPACE3) output = self.property_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['properties'])) self.assertNotificationLog("metadef_namespace.delete_properties", [{'namespace': NAMESPACE3}]) def test_namespace_delete_properties_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_properties, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_properties_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.namespace_controller.delete_properties(request, NAMESPACE3) output = self.property_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['properties'])) self.assertNotificationLog("metadef_namespace.delete_properties", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_properties(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_properties, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_delete_objects(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.namespace_controller.delete_objects(request, NAMESPACE3) output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['objects'])) self.assertNotificationLog("metadef_namespace.delete_objects", [{'namespace': NAMESPACE3}]) def test_namespace_delete_objects_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_objects, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_objects_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.namespace_controller.delete_objects(request, NAMESPACE3) output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['objects'])) self.assertNotificationLog("metadef_namespace.delete_objects", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_objects(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_objects, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_delete_tags(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.namespace_controller.delete_tags(request, NAMESPACE3) output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['tags'])) self.assertNotificationLog("metadef_namespace.delete_tags", [{'namespace': NAMESPACE3}]) def test_namespace_delete_tags_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_tags, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_tags_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.namespace_controller.delete_tags(request, NAMESPACE3) output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['tags'])) self.assertNotificationLog("metadef_namespace.delete_tags", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_tags(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_tags, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_create(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.create", [{'namespace': NAMESPACE4}]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) def test_namespace_create_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = '\U0001f693' self.assertRaises(webob.exc.HTTPBadRequest, self.namespace_controller.create, request, namespace) def test_namespace_create_duplicate(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = 'new-namespace' new_ns = self.namespace_controller.create(request, namespace) self.assertEqual('new-namespace', new_ns.namespace) self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.create, request, namespace) def test_namespace_create_different_owner(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace.owner = TENANT4 self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.create, request, namespace) self.assertNotificationsLog([]) def test_namespace_create_different_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace.owner = TENANT4 namespace = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.create", [{'namespace': NAMESPACE4}]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) def test_namespace_create_with_related_resources(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 prop1 = properties.PropertyType() prop1.type = 'string' prop1.title = 'title' prop2 = properties.PropertyType() prop2.type = 'string' prop2.title = 'title' namespace.properties = {PROPERTY1: prop1, PROPERTY2: prop2} object1 = objects.MetadefObject() object1.name = OBJECT1 object1.required = [] object1.properties = {} object2 = objects.MetadefObject() object2.name = OBJECT2 object2.required = [] object2.properties = {} namespace.objects = [object1, object2] output = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) output = output.to_dict() self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) output = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) output = output.to_dict() self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) self.assertNotificationsLog([ { 'type': 'metadef_namespace.create', 'payload': { 'namespace': NAMESPACE4, 'owner': TENANT1, } }, { 'type': 'metadef_object.create', 'payload': { 'namespace': NAMESPACE4, 'name': OBJECT1, 'properties': [], } }, { 'type': 'metadef_object.create', 'payload': { 'namespace': NAMESPACE4, 'name': OBJECT2, 'properties': [], } }, { 'type': 'metadef_property.create', 'payload': { 'namespace': NAMESPACE4, 'type': 'string', 'title': 'title', } }, { 'type': 'metadef_property.create', 'payload': { 'namespace': NAMESPACE4, 'type': 'string', 'title': 'title', } } ]) def test_namespace_create_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = namespaces.Namespace() namespace.namespace = NAMESPACE1 self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.create, request, namespace) self.assertNotificationsLog([]) def test_namespace_update(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.protected = False namespace = self.namespace_controller.update(request, namespace, NAMESPACE1) self.assertFalse(namespace.protected) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE1, 'protected': False} ]) namespace = self.namespace_controller.show(request, NAMESPACE1) self.assertFalse(namespace.protected) def test_namespace_update_non_existing(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.update, request, namespace, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_update_non_visible(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE2 self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.update, request, namespace, NAMESPACE2) self.assertNotificationsLog([]) def test_namespace_update_non_visible_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = self.namespace_controller.show(request, NAMESPACE2) namespace.protected = False namespace = self.namespace_controller.update(request, namespace, NAMESPACE2) self.assertFalse(namespace.protected) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE2, 'protected': False} ]) namespace = self.namespace_controller.show(request, NAMESPACE2) self.assertFalse(namespace.protected) def test_namespace_update_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.namespace = NAMESPACE4 namespace = self.namespace_controller.update(request, namespace, NAMESPACE1) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE4, 'namespace_old': NAMESPACE1} ]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE1) def test_namespace_update_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.namespace = '\U0001f693' self.assertRaises(webob.exc.HTTPBadRequest, self.namespace_controller.update, request, namespace, NAMESPACE1) def test_namespace_update_name_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.namespace = NAMESPACE2 self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.update, request, namespace, NAMESPACE1) self.assertNotificationsLog([]) def test_property_index(self): request = unit_test_utils.get_fake_request() output = self.property_controller.index(request, NAMESPACE3) self.assertEqual(2, len(output.properties)) actual = set([property for property in output.properties]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) def test_property_index_empty(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) output = self.property_controller.index(request, NAMESPACE2) self.assertEqual(0, len(output.properties)) def test_property_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.index, request, NAMESPACE4) def test_property_show(self): request = unit_test_utils.get_fake_request() output = self.property_controller.show(request, NAMESPACE3, PROPERTY1) self.assertEqual(PROPERTY1, output.name) def test_property_show_specific_resource_type(self): request = unit_test_utils.get_fake_request() output = self.property_controller.show( request, NAMESPACE6, ''.join([PREFIX1, PROPERTY4]), filters={'resource_type': RESOURCE_TYPE4}) self.assertEqual(PROPERTY4, output.name) def test_property_show_prefix_mismatch(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE6, PROPERTY4, filters={'resource_type': RESOURCE_TYPE4}) def test_property_show_non_existing_resource_type(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE2, PROPERTY1, filters={'resource_type': 'test'}) def test_property_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE2, PROPERTY1) def test_property_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE1, PROPERTY1) def test_property_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) output = self.property_controller.show(request, NAMESPACE1, PROPERTY1) self.assertEqual(PROPERTY1, output.name) def test_property_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationLog("metadef_property.delete", [{'name': PROPERTY1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_property.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.property_controller.delete, request, NAMESPACE3, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationLog("metadef_property.delete", [{'name': PROPERTY1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_non_existing(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE5, PROPERTY2) self.assertNotificationsLog([]) def test_property_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE4, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE1, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_admin_protected(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPForbidden, self.property_controller.delete, request, NAMESPACE1, PROPERTY1) self.assertNotificationsLog([]) def test_property_create(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.create", [{'name': PROPERTY2, 'namespace': NAMESPACE1}]) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_create_overlimit_name(self): request = unit_test_utils.get_fake_request('/metadefs/namespaces/' 'Namespace3/' 'properties', roles=['admin']) request.body = jsonutils.dump_as_bytes({ 'name': 'a' * 81, 'type': 'string', 'title': 'fake'}) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.property_deserializer.create, request) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_property_create_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = '\U0001f693' property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPBadRequest, self.property_controller.create, request, NAMESPACE1, property) def test_property_create_with_operators(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property.operators = [''] property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertEqual([''], property.operators) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertEqual([''], property.operators) def test_property_create_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPConflict, self.property_controller.create, request, NAMESPACE1, property) self.assertNotificationsLog([]) def test_property_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.create, request, NAMESPACE1, property) self.assertNotificationsLog([]) def test_property_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.create", [{'name': PROPERTY2, 'namespace': NAMESPACE1}]) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.create, request, NAMESPACE4, property) self.assertNotificationsLog([]) def test_property_create_duplicate(self): request = unit_test_utils.get_fake_request(roles=['admin']) property = properties.PropertyType() property.name = 'new-property' property.type = 'string' property.title = 'title' new_property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual('new-property', new_property.name) self.assertRaises(webob.exc.HTTPConflict, self.property_controller.create, request, NAMESPACE1, property) def test_property_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY1 property.type = 'string123' property.title = 'title123' property = self.property_controller.update(request, NAMESPACE3, PROPERTY1, property) self.assertEqual(PROPERTY1, property.name) self.assertEqual('string123', property.type) self.assertEqual('title123', property.title) self.assertNotificationLog("metadef_property.update", [ { 'name': PROPERTY1, 'namespace': NAMESPACE3, 'type': 'string123', 'title': 'title123', } ]) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) self.assertEqual(PROPERTY1, property.name) self.assertEqual('string123', property.type) self.assertEqual('title123', property.title) def test_property_update_name(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY3 property.type = 'string' property.title = 'title' property = self.property_controller.update(request, NAMESPACE3, PROPERTY1, property) self.assertEqual(PROPERTY3, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.update", [ { 'name': PROPERTY3, 'name_old': PROPERTY1, 'namespace': NAMESPACE3, 'type': 'string', 'title': 'title', } ]) property = self.property_controller.show(request, NAMESPACE3, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY2 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPConflict, self.property_controller.update, request, NAMESPACE3, PROPERTY1, property) self.assertNotificationsLog([]) def test_property_update_with_overlimit_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) request.body = jsonutils.dump_as_bytes({ 'name': 'a' * 81, 'type': 'string', 'title': 'fake'}) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.property_deserializer.create, request) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_property_update_with_4byte_character(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = '\U0001f693' property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPBadRequest, self.property_controller.update, request, NAMESPACE3, PROPERTY1, property) def test_property_update_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.update, request, NAMESPACE5, PROPERTY1, property) self.assertNotificationsLog([]) def test_property_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.update, request, NAMESPACE4, PROPERTY1, property) self.assertNotificationsLog([]) def test_object_index(self): request = unit_test_utils.get_fake_request() output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) def test_object_index_zero_limit(self): request = unit_test_utils.get_fake_request('/metadefs/namespaces/' 'Namespace3/' 'objects?limit=0') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_object_index_empty(self): request = unit_test_utils.get_fake_request() output = self.object_controller.index(request, NAMESPACE5) output = output.to_dict() self.assertEqual(0, len(output['objects'])) def test_object_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.index, request, NAMESPACE4) def test_object_show(self): request = unit_test_utils.get_fake_request() output = self.object_controller.show(request, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, output.name) def test_object_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE5, OBJECT1) def test_object_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE1, OBJECT1) def test_object_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) output = self.object_controller.show(request, NAMESPACE1, OBJECT1) self.assertEqual(OBJECT1, output.name) def test_object_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationLog("metadef_object.delete", [{'name': OBJECT1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_object.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.object_controller.delete, request, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationLog("metadef_object.delete", [{'name': OBJECT1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_non_existing(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE5, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE4, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE1, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_admin_protected(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPForbidden, self.object_controller.delete, request, NAMESPACE1, OBJECT1) self.assertNotificationsLog([]) def test_object_create(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object.properties = {} object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1, 'properties': []}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_create_invalid_properties(self): request = unit_test_utils.get_fake_request('/metadefs/namespaces/' 'Namespace3/' 'objects', roles=['admin']) body = { "name": "My Object", "description": "object1 description.", "properties": { "property1": { "type": "integer", "title": "property", "description": "property description", "test-key": "test-value", } } } request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_object_create_overlimit_name(self): request = unit_test_utils.get_fake_request('/metadefs/namespaces/' 'Namespace3/' 'objects', roles=['admin']) request.body = jsonutils.dump_as_bytes({'name': 'a' * 81}) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_object_create_duplicate(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = 'New-Object' object.required = [] object.properties = {} new_obj = self.object_controller.create(request, object, NAMESPACE3) self.assertEqual('New-Object', new_obj.name) self.assertRaises(webob.exc.HTTPConflict, self.object_controller.create, request, object, NAMESPACE3) def test_object_create_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPConflict, self.object_controller.create, request, object, NAMESPACE1) self.assertNotificationsLog([]) def test_object_create_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = '\U0001f693' object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPBadRequest, self.object_controller.create, request, object, NAMESPACE1) def test_object_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = PROPERTY1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.create, request, object, NAMESPACE4) self.assertNotificationsLog([]) def test_object_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.create, request, object, NAMESPACE1) self.assertNotificationsLog([]) def test_object_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object.properties = {} object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_create_missing_properties(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1, 'properties': []}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) object = self.object_controller.show(request, NAMESPACE3, OBJECT1) object.name = OBJECT1 object.description = 'description' object = self.object_controller.update(request, object, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, object.name) self.assertEqual('description', object.description) self.assertNotificationLog("metadef_object.update", [ { 'name': OBJECT1, 'namespace': NAMESPACE3, 'description': 'description', } ]) property = self.object_controller.show(request, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, property.name) self.assertEqual('description', object.description) def test_object_update_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = self.object_controller.show(request, NAMESPACE1, OBJECT1) object.name = OBJECT2 object = self.object_controller.update(request, object, NAMESPACE1, OBJECT1) self.assertEqual(OBJECT2, object.name) self.assertNotificationLog("metadef_object.update", [ { 'name': OBJECT2, 'name_old': OBJECT1, 'namespace': NAMESPACE1, } ]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) def test_object_update_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) object = self.object_controller.show(request, NAMESPACE1, OBJECT1) object.name = '\U0001f693' self.assertRaises(webob.exc.HTTPBadRequest, self.object_controller.update, request, object, NAMESPACE1, OBJECT1) def test_object_update_with_overlimit_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) request.body = jsonutils.dump_as_bytes( {"properties": {}, "name": "a" * 81, "required": []}) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_object_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) object = self.object_controller.show(request, NAMESPACE3, OBJECT1) object.name = OBJECT2 self.assertRaises(webob.exc.HTTPConflict, self.object_controller.update, request, object, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) def test_object_update_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.update, request, object, NAMESPACE5, OBJECT1) self.assertNotificationsLog([]) def test_object_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.update, request, object, NAMESPACE4, OBJECT1) self.assertNotificationsLog([]) def test_resource_type_index(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.index(request) self.assertEqual(3, len(output.resource_types)) actual = set([rtype.name for rtype in output.resource_types]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2, RESOURCE_TYPE4]) self.assertEqual(expected, actual) def test_resource_type_show(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(1, len(output.resource_type_associations)) actual = set([rt.name for rt in output.resource_type_associations]) expected = set([RESOURCE_TYPE1]) self.assertEqual(expected, actual) def test_resource_type_show_empty(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.show(request, NAMESPACE5) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_show_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, request, NAMESPACE2) def test_resource_type_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) output = self.rt_controller.show(request, NAMESPACE2) self.assertEqual(2, len(output.resource_type_associations)) actual = set([rt.name for rt in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_resource_type_show_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, request, NAMESPACE4) def test_resource_type_association_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationLog("metadef_resource_type.delete", [{'name': RESOURCE_TYPE1, 'namespace': NAMESPACE3}]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_resource_type.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationsLog([]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationLog("metadef_resource_type.delete", [{'name': RESOURCE_TYPE1, 'namespace': NAMESPACE3}]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_non_existing(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE2) self.assertNotificationsLog([]) def test_resource_type_association_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE4, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_protected_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_create(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.create, request, rt, NAMESPACE1) def test_resource_type_association_create_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE1) self.assertEqual(RESOURCE_TYPE2, rt.name) self.assertEqual('pref', rt.prefix) self.assertNotificationLog("metadef_resource_type.create", [{'name': RESOURCE_TYPE2, 'namespace': NAMESPACE1}]) output = self.rt_controller.show(request, NAMESPACE1) self.assertEqual(2, len(output.resource_type_associations)) actual = set([x.name for x in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_resource_type_association_create_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE1 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPConflict, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE1 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, request, rt, NAMESPACE4) self.assertNotificationsLog([]) def test_resource_type_association_create_non_existing_resource_type(self): request = unit_test_utils.get_fake_request(roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE3 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_visible_namesp_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE1) self.assertEqual(RESOURCE_TYPE2, rt.name) self.assertEqual('pref', rt.prefix) self.assertNotificationLog("metadef_resource_type.create", [{'name': RESOURCE_TYPE2, 'namespace': NAMESPACE1}]) output = self.rt_controller.show(request, NAMESPACE1) self.assertEqual(2, len(output.resource_type_associations)) actual = set([x.name for x in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_tag_index(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2]) self.assertEqual(expected, actual) def test_tag_index_empty(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.index(request, NAMESPACE5) output = output.to_dict() self.assertEqual(0, len(output['tags'])) def test_tag_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.index, request, NAMESPACE4) def test_tag_show(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.show(request, NAMESPACE3, TAG1) self.assertEqual(TAG1, output.name) def test_tag_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE5, TAG1) def test_tag_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE1, TAG1) def test_tag_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) output = self.tag_controller.show(request, NAMESPACE1, TAG1) self.assertEqual(TAG1, output.name) def test_tag_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationLog("metadef_tag.delete", [{'name': TAG1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_tag.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.tag_controller.delete, request, NAMESPACE3, TAG1) self.assertNotificationsLog([]) def test_tag_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationLog("metadef_tag.delete", [{'name': TAG1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_non_existing(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE5, TAG1) self.assertNotificationsLog([]) def test_tag_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE4, TAG1) self.assertNotificationsLog([]) def test_tag_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_delete_admin_protected(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPForbidden, self.tag_controller.delete, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create(self): request = unit_test_utils.get_fake_request(roles=['admin']) tag = self.tag_controller.create(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.create", [{'name': TAG2, 'namespace': NAMESPACE1}]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_create_overlimit_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.tag_controller.create, request, NAMESPACE1, 'a' * 81) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_tag_create_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPBadRequest, self.tag_controller.create, request, NAMESPACE1, '\U0001f693') def test_tag_create_tags(self): request = unit_test_utils.get_fake_request(roles=['admin']) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture() output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG1, 'namespace': NAMESPACE1}, {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) def test_tag_create_tags_with_append_true(self): request = unit_test_utils.get_fake_request( headers={'X-Openstack-Append': 'True'}, roles=['admin']) metadef_tags = tags.MetadefTags() # As TAG1 is already created in setup, just creating other two tags. metadef_tags.tags = _db_tags_fixture([TAG2, TAG3]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG4, TAG5]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG4, 'namespace': NAMESPACE1}, {'name': TAG5, 'namespace': NAMESPACE1}, ] ) output = self.tag_controller.index(request, NAMESPACE1) output = output.to_dict() self.assertEqual(5, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3, TAG4, TAG5]) self.assertEqual(expected, actual) def test_tag_create_tags_with_append_false(self): request = unit_test_utils.get_fake_request( headers={'X-Openstack-Append': 'False'}, roles=['admin']) metadef_tags = tags.MetadefTags() # As TAG1 is already created in setup, just creating other two tags. metadef_tags.tags = _db_tags_fixture([TAG2, TAG3]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG4, TAG5]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG4, 'namespace': NAMESPACE1}, {'name': TAG5, 'namespace': NAMESPACE1}, ] ) output = self.tag_controller.index(request, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG4, TAG5]) self.assertEqual(expected, actual) def test_tag_create_duplicate_tags(self): request = unit_test_utils.get_fake_request(roles=['admin']) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) self.assertRaises( webob.exc.HTTPConflict, self.tag_controller.create_tags, request, metadef_tags, NAMESPACE1) self.assertNotificationsLog([]) def test_tag_create_duplicate_with_pre_existing_tags(self): request = unit_test_utils.get_fake_request(roles=['admin']) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG1, TAG2, TAG3]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG1, 'namespace': NAMESPACE1}, {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) self.assertRaises( webob.exc.HTTPConflict, self.tag_controller.create_tags, request, metadef_tags, NAMESPACE1) self.assertNotificationsLog([]) output = self.tag_controller.index(request, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) def test_tag_create_duplicate_with_pre_existing_tags_with_append(self): request = unit_test_utils.get_fake_request( headers={'X-Openstack-Append': 'True'}, roles=['admin']) metadef_tags = tags.MetadefTags() # As TAG1 is already created in setup, just creating other two tags. metadef_tags.tags = _db_tags_fixture([TAG2, TAG3]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) self.assertRaises( webob.exc.HTTPConflict, self.tag_controller.create_tags, request, metadef_tags, NAMESPACE1) self.assertNotificationsLog([]) output = self.tag_controller.index(request, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) def test_tag_create_conflict(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPConflict, self.tag_controller.create, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request(roles=['admin']) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.create, request, NAMESPACE4, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.create, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, roles=['admin']) tag = self.tag_controller.create(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.create", [{'name': TAG2, 'namespace': NAMESPACE1}]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) tag = self.tag_controller.show(request, NAMESPACE3, TAG1) tag.name = TAG3 tag = self.tag_controller.update(request, tag, NAMESPACE3, TAG1) self.assertEqual(TAG3, tag.name) self.assertNotificationLog("metadef_tag.update", [ {'name': TAG3, 'namespace': NAMESPACE3} ]) property = self.tag_controller.show(request, NAMESPACE3, TAG3) self.assertEqual(TAG3, property.name) def test_tag_update_name(self): request = unit_test_utils.get_fake_request(roles=['admin']) tag = self.tag_controller.show(request, NAMESPACE1, TAG1) tag.name = TAG2 tag = self.tag_controller.update(request, tag, NAMESPACE1, TAG1) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.update", [ {'name': TAG2, 'name_old': TAG1, 'namespace': NAMESPACE1} ]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_update_with_4byte_character(self): request = unit_test_utils.get_fake_request(roles=['admin']) tag = self.tag_controller.show(request, NAMESPACE1, TAG1) tag.name = '\U0001f693' self.assertRaises(webob.exc.HTTPBadRequest, self.tag_controller.update, request, tag, NAMESPACE1, TAG1) def test_tag_update_with_name_overlimit(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes( {"properties": {}, "name": "a" * 81, "required": []}) exc = self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) self.assertIn("Failed validating 'maxLength' in " "schema['properties']['name']", exc.explanation) def test_tag_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3, roles=['admin']) tag = self.tag_controller.show(request, NAMESPACE3, TAG1) tag.name = TAG2 self.assertRaises(webob.exc.HTTPConflict, self.tag_controller.update, request, tag, NAMESPACE3, TAG1) self.assertNotificationsLog([]) def test_tag_update_non_existing(self): request = unit_test_utils.get_fake_request( tenant=TENANT3, roles=['admin']) tag = tags.MetadefTag() tag.name = TAG1 self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.update, request, tag, NAMESPACE5, TAG1) self.assertNotificationsLog([]) def test_tag_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request( tenant=TENANT3, roles=['admin']) tag = tags.MetadefTag() tag.name = TAG1 self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.update, request, tag, NAMESPACE4, TAG1) self.assertNotificationsLog([]) class TestMetadefNamespaceResponseSerializers(base.IsolatedUnitTest): def setUp(self): super(TestMetadefNamespaceResponseSerializers, self).setUp() self.serializer = namespaces.ResponseSerializer(schema={}) self.response = mock.Mock() self.result = mock.Mock() def test_delete_tags(self): self.serializer.delete_tags(self.response, self.result) self.assertEqual(204, self.response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_schemas_resource.py0000664000175000017500000000613700000000000023731 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.api.v2.schemas import glance.db.sqlalchemy.api as db_api import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils class TestSchemasController(test_utils.BaseTestCase): def setUp(self): super(TestSchemasController, self).setUp() self.controller = glance.api.v2.schemas.Controller() def test_image(self): req = unit_test_utils.get_fake_request() output = self.controller.image(req) self.assertEqual('image', output['name']) expected = set(['status', 'name', 'tags', 'checksum', 'created_at', 'disk_format', 'updated_at', 'visibility', 'self', 'file', 'container_format', 'schema', 'id', 'size', 'direct_url', 'min_ram', 'min_disk', 'protected', 'locations', 'owner', 'virtual_size', 'os_hidden', 'os_hash_algo', 'os_hash_value', 'stores']) self.assertEqual(expected, set(output['properties'].keys())) def test_image_has_correct_statuses(self): req = unit_test_utils.get_fake_request() output = self.controller.image(req) self.assertEqual('image', output['name']) expected_statuses = set(db_api.STATUSES) actual_statuses = set(output['properties']['status']['enum']) self.assertEqual(expected_statuses, actual_statuses) def test_images(self): req = unit_test_utils.get_fake_request() output = self.controller.images(req) self.assertEqual('images', output['name']) expected = set(['images', 'schema', 'first', 'next']) self.assertEqual(expected, set(output['properties'].keys())) expected = set(['{schema}', '{first}', '{next}']) actual = set([link['href'] for link in output['links']]) self.assertEqual(expected, actual) def test_member(self): req = unit_test_utils.get_fake_request() output = self.controller.member(req) self.assertEqual('member', output['name']) expected = set(['status', 'created_at', 'updated_at', 'image_id', 'member_id', 'schema']) self.assertEqual(expected, set(output['properties'].keys())) def test_members(self): req = unit_test_utils.get_fake_request() output = self.controller.members(req) self.assertEqual('members', output['name']) expected = set(['schema', 'members']) self.assertEqual(expected, set(output['properties'].keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_tasks_resource.py0000664000175000017500000011371100000000000023430 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client as http from unittest import mock import uuid from oslo_config import cfg from oslo_serialization import jsonutils import webob import glance.api.v2.tasks from glance.common import timeutils import glance.domain import glance.gateway from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' DATETIME = datetime.datetime(2013, 9, 28, 15, 27, 36, 325355) ISOTIME = '2013-09-28T15:27:36Z' def _db_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': 'pending', 'type': 'import', 'input': {}, 'result': None, 'owner': None, 'image_id': 'fake_image_id', 'user_id': 'fake_user', 'request_id': 'fake_request_id', 'message': None, 'expires_at': default_datetime + datetime.timedelta(days=365), 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj def _domain_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() task_properties = { 'task_id': task_id, 'status': kwargs.get('status', 'pending'), 'task_type': kwargs.get('type', 'import'), 'owner': kwargs.get('owner'), 'expires_at': kwargs.get('expires_at'), 'created_at': kwargs.get('created_at', default_datetime), 'updated_at': kwargs.get('updated_at', default_datetime), 'task_input': kwargs.get('task_input', {}), 'message': kwargs.get('message'), 'result': kwargs.get('result'), 'image_id': kwargs.get('image_id'), 'user_id': kwargs.get('user_id'), 'request_id': kwargs.get('request_id'), } task = glance.domain.Task(**task_properties) return task CONF = cfg.CONF CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') class TestTasksController(test_utils.BaseTestCase): def setUp(self): super(TestTasksController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() self._create_tasks() self.controller = glance.api.v2.tasks.TasksController(self.db, self.policy, self.notifier, self.store) self.gateway = glance.gateway.Gateway(self.db, self.store, self.notifier, self.policy) def _create_tasks(self): now = timeutils.utcnow() times = [now + datetime.timedelta(seconds=5 * i) for i in range(4)] self.tasks = [ _db_fixture(UUID1, owner=TENANT1, created_at=times[0], updated_at=times[0]), # FIXME(venkatesh): change the type to include clone and export # once they are included as a valid types under Task domain model. _db_fixture(UUID2, owner=TENANT2, type='import', created_at=times[1], updated_at=times[1]), _db_fixture(UUID3, owner=TENANT3, type='import', created_at=times[2], updated_at=times[2]), _db_fixture(UUID4, owner=TENANT4, type='import', created_at=times[3], updated_at=times[3])] [self.db.task_create(None, task) for task in self.tasks] def test_index(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request) self.assertEqual(4, len(output['tasks'])) def test_index_return_parameters(self): self.config(limit_param_default=1, api_limit_max=4) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID3, limit=1, sort_key='created_at', sort_dir='desc') self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID2]) self.assertEqual(expected, actual) self.assertEqual(UUID2, output['next_marker']) def test_index_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID3, limit=2) self.assertEqual(2, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID2, UUID1]) self.assertEqual(expected, actual) self.assertEqual(UUID1, output['next_marker']) def test_index_no_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID1, limit=2) self.assertEqual(0, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([]) self.assertEqual(expected, actual) self.assertNotIn('next_marker', output) def test_index_with_id_filter(self): request = unit_test_utils.get_fake_request('/tasks?id=%s' % UUID1) output = self.controller.index(request, filters={'id': UUID1}) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_filters_return_many(self): path = '/tasks?status=pending' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, filters={'status': 'pending'}) self.assertEqual(4, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1, UUID2, UUID3, UUID4]) self.assertEqual(sorted(expected), sorted(actual)) def test_index_with_many_filters(self): url = '/tasks?status=pending&type=import' request = unit_test_utils.get_fake_request(url, is_admin=True) output = self.controller.index(request, filters={ 'status': 'pending', 'type': 'import', 'owner': TENANT1, }) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_marker(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, marker=UUID3) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(1, len(actual)) self.assertIn(UUID2, actual) def test_index_with_limit(self): path = '/tasks' limit = 2 request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, limit=limit) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(limit, len(actual)) def test_index_greater_than_limit_max(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, limit=4) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(3, len(actual)) self.assertNotIn(output['next_marker'], output) def test_index_default_limit(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(1, len(actual)) def test_index_with_sort_dir(self): path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, sort_dir='asc', limit=3) actual = [task.task_id for task in output['tasks']] self.assertEqual(3, len(actual)) self.assertEqual([UUID1, UUID2, UUID3], actual) def test_index_with_sort_key(self): path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, sort_key='created_at', limit=3) actual = [task.task_id for task in output['tasks']] self.assertEqual(3, len(actual)) self.assertEqual(UUID4, actual[0]) self.assertEqual(UUID3, actual[1]) self.assertEqual(UUID2, actual[2]) def test_index_with_marker_not_found(self): fake_uuid = str(uuid.uuid4()) path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_index_with_marker_is_not_like_uuid(self): marker = 'INVALID_UUID' path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=marker) def test_index_invalid_sort_key(self): path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, sort_key='foo') def test_index_zero_tasks(self): self.db.reset() request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual([], output['tasks']) def test_get(self): request = unit_test_utils.get_fake_request() task = self.controller.get(request, task_id=UUID1) self.assertEqual(UUID1, task.task_id) self.assertEqual('import', task.type) def test_get_non_existent(self): request = unit_test_utils.get_fake_request() task_id = str(uuid.uuid4()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.get, request, task_id) def test_get_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertEqual(TENANT1, request.context.project_id) self.assertRaises(webob.exc.HTTPNotFound, self.controller.get, request, UUID4) @mock.patch('glance.api.common.get_thread_pool') @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') @mock.patch.object(glance.gateway.Gateway, 'get_task_executor_factory') @mock.patch.object(glance.gateway.Gateway, 'get_task_repo') def test_create(self, mock_get_task_repo, mock_get_task_executor_factory, mock_get_task_factory, mock_get_thread_pool): # setup request = unit_test_utils.get_fake_request() task = { "type": "import", "input": { "import_from": "swift://cloud.foo/myaccount/mycontainer/path", "import_from_format": "qcow2", "image_properties": {} } } get_task_factory = mock.Mock() mock_get_task_factory.return_value = get_task_factory new_task = mock.Mock() get_task_factory.new_task.return_value = new_task new_task.run.return_value = mock.ANY get_task_executor_factory = mock.Mock() mock_get_task_executor_factory.return_value = get_task_executor_factory get_task_executor_factory.new_task_executor.return_value = mock.Mock() get_task_repo = mock.Mock() mock_get_task_repo.return_value = get_task_repo get_task_repo.add.return_value = mock.Mock() # call self.controller.create(request, task=task) # assert self.assertEqual(1, get_task_factory.new_task.call_count) self.assertEqual(1, get_task_repo.add.call_count) self.assertEqual( 1, get_task_executor_factory.new_task_executor.call_count) # Make sure that we spawned the task's run method mock_get_thread_pool.assert_called_once_with('tasks_pool') mock_get_thread_pool.return_value.spawn.assert_called_once_with( new_task.run, get_task_executor_factory.new_task_executor.return_value) @mock.patch('glance.common.scripts.utils.get_image_data_iter') @mock.patch('glance.common.scripts.utils.validate_location_uri') def test_create_with_live_time(self, mock_validate_location_uri, mock_get_image_data_iter): self.skipTest("Something wrong, this test touches registry") request = unit_test_utils.get_fake_request() task = { "type": "import", "input": { "import_from": "http://download.cirros-cloud.net/0.3.4/" "cirros-0.3.4-x86_64-disk.img", "import_from_format": "qcow2", "image_properties": { "disk_format": "qcow2", "container_format": "bare", "name": "test-task" } } } new_task = self.controller.create(request, task=task) executor_factory = self.gateway.get_task_executor_factory( request.context) task_executor = executor_factory.new_task_executor(request.context) task_executor.begin_processing(new_task.task_id) success_task = self.controller.get(request, new_task.task_id) # ignore second and microsecond to avoid flaky runs task_live_time = (success_task.expires_at.replace(second=0, microsecond=0) - success_task.updated_at.replace(second=0, microsecond=0)) task_live_time_hour = (task_live_time.days * 24 + task_live_time.seconds / 3600) self.assertEqual(CONF.task.task_time_to_live, task_live_time_hour) def test_create_with_wrong_import_form(self): request = unit_test_utils.get_fake_request() wrong_import_from = [ "swift://cloud.foo/myaccount/mycontainer/path", "file:///path", "cinder://volume-id" ] executor_factory = self.gateway.get_task_executor_factory( request.context) task_repo = self.gateway.get_task_repo(request.context) for import_from in wrong_import_from: task = { "type": "import", "input": { "import_from": import_from, "import_from_format": "qcow2", "image_properties": { "disk_format": "qcow2", "container_format": "bare", "name": "test-task" } } } new_task = self.controller.create(request, task=task) task_executor = executor_factory.new_task_executor(request.context) task_executor.begin_processing(new_task.task_id) final_task = task_repo.get(new_task.task_id) self.assertEqual('failure', final_task.status) if import_from.startswith("file:///"): msg = ("File based imports are not allowed. Please use a " "non-local source of image data.") else: supported = ['http', ] msg = ("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} self.assertEqual(msg, final_task.message) def test_create_with_properties_missed(self): request = unit_test_utils.get_fake_request() executor_factory = self.gateway.get_task_executor_factory( request.context) task_repo = self.gateway.get_task_repo(request.context) task = { "type": "import", "input": { "import_from": "swift://cloud.foo/myaccount/mycontainer/path", "import_from_format": "qcow2", } } new_task = self.controller.create(request, task=task) task_executor = executor_factory.new_task_executor(request.context) task_executor.begin_processing(new_task.task_id) final_task = task_repo.get(new_task.task_id) self.assertEqual('failure', final_task.status) msg = "Input does not contain 'image_properties' field" self.assertEqual(msg, final_task.message) @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') def test_notifications_on_create(self, mock_get_task_factory): request = unit_test_utils.get_fake_request() new_task = mock.MagicMock(type='import') mock_get_task_factory.new_task.return_value = new_task new_task.run.return_value = mock.ANY task = {"type": "import", "input": { "import_from": "http://cloud.foo/myaccount/mycontainer/path", "import_from_format": "qcow2", "image_properties": {} } } task = self.controller.create(request, task=task) output_logs = [nlog for nlog in self.notifier.get_logs() if nlog['event_type'] == 'task.create'] self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.create', output_log['event_type']) class TestTasksControllerPolicies(base.IsolatedUnitTest): def setUp(self): super(TestTasksControllerPolicies, self).setUp() self.db = unit_test_utils.FakeDB() self.policy = unit_test_utils.FakePolicyEnforcer() self.controller = glance.api.v2.tasks.TasksController(self.db, self.policy) def test_access_get_unauthorized(self): rules = {"tasks_api_access": False, "get_task": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get, request, task_id=UUID2) def test_delete(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPMethodNotAllowed, self.controller.delete, request, 'fake_id') def test_access_delete_unauthorized(self): rules = {"tasks_api_access": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, 'fake_id') class TestTasksDeserializerPolicies(test_utils.BaseTestCase): # NOTE(rosmaita): this is a bit weird, but we check the access # policy in the RequestDeserializer for calls that take bodies # or query strings because we want to make sure the failure is # a 403, not a 400 due to bad request format def setUp(self): super(TestTasksDeserializerPolicies, self).setUp() self.policy = unit_test_utils.FakePolicyEnforcer() self.deserializer = glance.api.v2.tasks.RequestDeserializer( schema=None, policy_engine=self.policy) bad_path = '/tasks?limit=NaN' def test_access_index_authorized_bad_query_string(self): """Allow access, fail with 400""" rules = {"tasks_api_access": True, "get_tasks": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(self.bad_path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_access_index_unauthorized(self): """Disallow access with bad request, fail with 403""" rules = {"tasks_api_access": False, "get_tasks": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(self.bad_path) self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.index, request) bad_task = {'typo': 'import', 'input': {"import_from": "fake"}} def test_access_create_authorized_bad_format(self): """Allow access, fail with 400""" rules = {"tasks_api_access": True, "add_task": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes(self.bad_task) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_access_create_unauthorized(self): """Disallow access with bad request, fail with 403""" rules = {"tasks_api_access": False, "add_task": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes(self.bad_task) self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.create, request) class TestTasksDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestTasksDeserializer, self).setUp() self.policy = unit_test_utils.FakePolicyEnforcer() self.deserializer = glance.api.v2.tasks.RequestDeserializer( policy_engine=self.policy) def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'type': 'import', 'input': {'import_from': 'swift://cloud.foo/myaccount/mycontainer/path', 'import_from_format': 'qcow2', 'image_properties': {'name': 'fake1'}}, }) output = self.deserializer.create(request) properties = { 'type': 'import', 'input': {'import_from': 'swift://cloud.foo/myaccount/mycontainer/path', 'import_from_format': 'qcow2', 'image_properties': {'name': 'fake1'}}, } self.maxDiff = None expected = {'task': properties} self.assertEqual(expected, output) def test_index(self): marker = str(uuid.uuid4()) path = '/tasks?limit=1&marker=%s' % marker request = unit_test_utils.get_fake_request(path) expected = {'limit': 1, 'marker': marker, 'sort_key': 'created_at', 'sort_dir': 'desc', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_strip_params_from_filters(self): type = 'import' path = '/tasks?type=%s' % type request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(type, output['filters']['type']) def test_index_with_many_filter(self): status = 'success' type = 'import' path = '/tasks?status=%(status)s&type=%(type)s' % {'status': status, 'type': type} request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(status, output['filters']['status']) self.assertEqual(type, output['filters']['type']) def test_index_with_filter_and_limit(self): status = 'success' path = '/tasks?status=%s&limit=1' % status request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(status, output['filters']['status']) self.assertEqual(1, output['limit']) def test_index_non_integer_limit(self): request = unit_test_utils.get_fake_request('/tasks?limit=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_zero_limit(self): request = unit_test_utils.get_fake_request('/tasks?limit=0') expected = {'limit': 0, 'sort_key': 'created_at', 'sort_dir': 'desc', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_negative_limit(self): path = '/tasks?limit=-1' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_fraction(self): request = unit_test_utils.get_fake_request('/tasks?limit=1.1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_invalid_status(self): path = '/tasks?status=blah' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_marker(self): marker = str(uuid.uuid4()) path = '/tasks?marker=%s' % marker request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(marker, output.get('marker')) def test_index_marker_not_specified(self): request = unit_test_utils.get_fake_request('/tasks') output = self.deserializer.index(request) self.assertNotIn('marker', output) def test_index_limit_not_specified(self): request = unit_test_utils.get_fake_request('/tasks') output = self.deserializer.index(request) self.assertNotIn('limit', output) def test_index_sort_key_id(self): request = unit_test_utils.get_fake_request('/tasks?sort_key=id') output = self.deserializer.index(request) expected = { 'sort_key': 'id', 'sort_dir': 'desc', 'filters': {} } self.assertEqual(expected, output) def test_index_sort_dir_asc(self): request = unit_test_utils.get_fake_request('/tasks?sort_dir=asc') output = self.deserializer.index(request) expected = { 'sort_key': 'created_at', 'sort_dir': 'asc', 'filters': {}} self.assertEqual(expected, output) def test_index_sort_dir_bad_value(self): request = unit_test_utils.get_fake_request('/tasks?sort_dir=invalid') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) class TestTasksSerializer(test_utils.BaseTestCase): def setUp(self): super(TestTasksSerializer, self).setUp() self.serializer = glance.api.v2.tasks.ResponseSerializer() self.fixtures = [ _domain_fixture(UUID1, type='import', status='pending', task_input={'loc': 'fake'}, result={}, owner=TENANT1, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', message='', created_at=DATETIME, updated_at=DATETIME), _domain_fixture(UUID2, type='import', status='processing', task_input={'loc': 'bake'}, owner=TENANT2, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', message='', created_at=DATETIME, updated_at=DATETIME, result={}), _domain_fixture(UUID3, type='import', status='success', task_input={'loc': 'foo'}, owner=TENANT3, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', message='', created_at=DATETIME, updated_at=DATETIME, result={}, expires_at=DATETIME), _domain_fixture(UUID4, type='import', status='failure', task_input={'loc': 'boo'}, owner=TENANT4, image_id='fake_image_id', user_id='fake_user', request_id='fake_request_id', message='', created_at=DATETIME, updated_at=DATETIME, result={}, expires_at=DATETIME), ] def test_index(self): expected = { 'tasks': [ { 'id': UUID1, 'type': 'import', 'status': 'pending', 'owner': TENANT1, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID1, 'schema': '/v2/schemas/task', }, { 'id': UUID2, 'type': 'import', 'status': 'processing', 'owner': TENANT2, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID2, 'schema': '/v2/schemas/task', }, { 'id': UUID3, 'type': 'import', 'status': 'success', 'owner': TENANT3, 'expires_at': ISOTIME, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID3, 'schema': '/v2/schemas/task', }, { 'id': UUID4, 'type': 'import', 'status': 'failure', 'owner': TENANT4, 'expires_at': ISOTIME, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID4, 'schema': '/v2/schemas/task', }, ], 'first': '/v2/tasks', 'schema': '/v2/schemas/tasks', } request = webob.Request.blank('/v2/tasks') response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_index_next_marker(self): request = webob.Request.blank('/v2/tasks') response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) self.assertEqual('/v2/tasks?marker=%s' % UUID2, output['next']) def test_index_carries_query_parameters(self): url = '/v2/tasks?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) expected_url = '/v2/tasks?limit=10&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), unit_test_utils.sort_url_by_qs_keys(output['first'])) expect_next = '/v2/tasks?limit=10&marker=%s&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys( expect_next % UUID2), unit_test_utils.sort_url_by_qs_keys(output['next'])) def test_get(self): expected = { 'id': UUID4, 'type': 'import', 'status': 'failure', 'input': {'loc': 'boo'}, 'result': {}, 'owner': TENANT4, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'expires_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID4, 'schema': '/v2/schemas/task', 'image_id': 'fake_image_id', 'user_id': 'fake_user', 'request_id': 'fake_request_id', } response = webob.Response() self.serializer.get(response, self.fixtures[3]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_get_ensure_expires_at_not_returned(self): expected = { 'id': UUID1, 'type': 'import', 'status': 'pending', 'input': {'loc': 'fake'}, 'result': {}, 'owner': TENANT1, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID1, 'schema': '/v2/schemas/task', 'image_id': 'fake_image_id', 'user_id': 'fake_user', 'request_id': 'fake_request_id', } response = webob.Response() self.serializer.get(response, self.fixtures[0]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) expected = { 'id': UUID2, 'type': 'import', 'status': 'processing', 'input': {'loc': 'bake'}, 'result': {}, 'owner': TENANT2, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID2, 'schema': '/v2/schemas/task', 'image_id': 'fake_image_id', 'user_id': 'fake_user', 'request_id': 'fake_request_id', } response = webob.Response() self.serializer.get(response, self.fixtures[1]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): response = webob.Response() self.serializer.create(response, self.fixtures[3]) serialized_task = jsonutils.loads(response.body) self.assertEqual(http.CREATED, response.status_int) self.assertEqual(self.fixtures[3].task_id, serialized_task['id']) self.assertEqual(self.fixtures[3].task_input, serialized_task['input']) self.assertIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) def test_create_ensure_expires_at_is_not_returned(self): response = webob.Response() self.serializer.create(response, self.fixtures[0]) serialized_task = jsonutils.loads(response.body) self.assertEqual(http.CREATED, response.status_int) self.assertEqual(self.fixtures[0].task_id, serialized_task['id']) self.assertEqual(self.fixtures[0].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) response = webob.Response() self.serializer.create(response, self.fixtures[1]) serialized_task = jsonutils.loads(response.body) self.assertEqual(http.CREATED, response.status_int) self.assertEqual(self.fixtures[1].task_id, serialized_task['id']) self.assertEqual(self.fixtures[1].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/unit/v2/test_v2_policy.py0000664000175000017500000011254600000000000022307 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import webob.exc from glance.api.v2 import policy from glance.common import exception from glance.tests import utils class APIPolicyBase(utils.BaseTestCase): def setUp(self): super(APIPolicyBase, self).setUp() self.enforcer = mock.MagicMock() self.context = mock.MagicMock() self.policy = policy.APIPolicyBase(self.context, enforcer=self.enforcer) def test_enforce(self): # Enforce passes self.policy._enforce('fake_rule') self.enforcer.enforce.assert_called_once_with( self.context, 'fake_rule', mock.ANY) # Make sure that Forbidden gets caught and translated self.enforcer.enforce.side_effect = exception.Forbidden self.assertRaises(webob.exc.HTTPForbidden, self.policy._enforce, 'fake_rule') # Any other exception comes straight through self.enforcer.enforce.side_effect = exception.ImageNotFound self.assertRaises(exception.ImageNotFound, self.policy._enforce, 'fake_rule') def test_check(self): # Check passes self.assertTrue(self.policy.check('_enforce', 'fake_rule')) # Check fails self.enforcer.enforce.side_effect = exception.Forbidden self.assertFalse(self.policy.check('_enforce', 'fake_rule')) def test_check_is_image_mutable(self): context = mock.MagicMock() image = mock.MagicMock() # Admin always wins context.is_admin = True context.owner = 'someuser' self.assertIsNone(policy.check_is_image_mutable(context, image)) # Image has no owner is never mutable by non-admins context.is_admin = False image.owner = None self.assertRaises(exception.Forbidden, policy.check_is_image_mutable, context, image) # Not owner is not mutable image.owner = 'someoneelse' self.assertRaises(exception.Forbidden, policy.check_is_image_mutable, context, image) # No project in context means not mutable image.owner = 'someoneelse' context.owner = None self.assertRaises(exception.Forbidden, policy.check_is_image_mutable, context, image) # Context matches image owner is mutable image.owner = 'someuser' context.owner = 'someuser' self.assertIsNone(policy.check_is_image_mutable(context, image)) class APIImagePolicy(APIPolicyBase): def setUp(self): super(APIImagePolicy, self).setUp() self.image = mock.MagicMock() self.policy = policy.ImageAPIPolicy(self.context, self.image, enforcer=self.enforcer) def test_enforce(self): self.assertRaises(webob.exc.HTTPNotFound, super(APIImagePolicy, self).test_enforce) @mock.patch('glance.api.policy._enforce_image_visibility') def test_enforce_visibility(self, mock_enf): # Visibility passes self.policy._enforce_visibility('something') mock_enf.assert_called_once_with(self.enforcer, self.context, 'something', mock.ANY) # Make sure that Forbidden gets caught and translated mock_enf.side_effect = exception.Forbidden self.assertRaises(webob.exc.HTTPForbidden, self.policy._enforce_visibility, 'something') # Any other exception comes straight through mock_enf.side_effect = exception.ImageNotFound self.assertRaises(exception.ImageNotFound, self.policy._enforce_visibility, 'something') def test_update_property(self): with mock.patch.object(self.policy, '_enforce') as mock_enf: self.policy.update_property('foo', None) mock_enf.assert_called_once_with('modify_image') with mock.patch.object(self.policy, '_enforce_visibility') as mock_enf: self.policy.update_property('visibility', 'foo') mock_enf.assert_called_once_with('foo') def test_update_locations(self): self.policy.update_locations() self.enforcer.enforce.assert_called_once_with(self.context, 'set_image_location', mock.ANY) def test_delete_locations(self): self.policy.delete_locations() self.enforcer.enforce.assert_called_once_with(self.context, 'delete_image_location', mock.ANY) def test_delete_locations_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.delete_locations() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.delete_locations() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.delete_locations) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.delete_locations() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=True and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.delete_locations() self.assertFalse(m.called) def test_get_image_location(self): self.policy.get_image_location() self.enforcer.enforce.assert_called_once_with(self.context, 'get_image_location', mock.ANY) def test_enforce_exception_behavior(self): with mock.patch.object(self.policy.enforcer, 'enforce') as mock_enf: # First make sure we can update if allowed self.policy.update_property('foo', None) self.assertTrue(mock_enf.called) # Make sure that if modify_image and get_image both return # Forbidden then we should get NotFound. This is because # we are not allowed to delete the image, nor see that it # even exists. mock_enf.reset_mock() mock_enf.side_effect = exception.Forbidden self.assertRaises(webob.exc.HTTPNotFound, self.policy.update_property, 'foo', None) # Make sure we checked modify_image, and then get_image. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'modify_image', mock.ANY), mock.call(mock.ANY, 'get_image', mock.ANY)]) # Make sure that if modify_image is disallowed, but # get_image is allowed, that we get Forbidden. This is # because we are allowed to see the image, but not modify # it, so 403 indicates that without confusing the user and # returning "not found" for an image they are able to GET. mock_enf.reset_mock() mock_enf.side_effect = [exception.Forbidden, lambda *a: None] self.assertRaises(webob.exc.HTTPForbidden, self.policy.update_property, 'foo', None) # Make sure we checked modify_image, and then get_image. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'modify_image', mock.ANY), mock.call(mock.ANY, 'get_image', mock.ANY)]) def test_get_image(self): self.policy.get_image() self.enforcer.enforce.assert_called_once_with(self.context, 'get_image', mock.ANY) def test_get_images(self): self.policy.get_images() self.enforcer.enforce.assert_called_once_with(self.context, 'get_images', mock.ANY) def test_add_image(self): generic_target = {'project_id': self.context.project_id, 'owner': self.context.project_id, 'visibility': 'private'} self.policy = policy.ImageAPIPolicy(self.context, {}, enforcer=self.enforcer) self.policy.add_image() self.enforcer.enforce.assert_called_once_with(self.context, 'add_image', generic_target) def test_add_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') self.context.is_admin = False self.policy = policy.ImageAPIPolicy(self.context, {'owner': 'else'}, enforcer=self.enforcer) self.assertRaises(exception.Forbidden, self.policy.add_image) # Make sure we're calling the legacy handler if secure_rbac is False with mock.patch('glance.api.v2.policy.check_admin_or_same_owner') as m: self.policy.add_image() m.assert_called_once_with(self.context, {'project_id': 'else', 'owner': 'else', 'visibility': 'private'}) # Make sure we are not calling the legacy handler if # secure_rbac is being used. We won't fail the check because # our enforcer is a mock, just make sure we don't call that handler. self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_admin_or_same_owner') as m: self.policy.add_image() m.assert_not_called() def test_add_image_translates_owner_failure(self): self.policy = policy.ImageAPIPolicy(self.context, {'owner': 'else'}, enforcer=self.enforcer) # Make sure add_image works with no exception self.policy.add_image() # Make sure we don't get in the way of any other exceptions self.enforcer.enforce.side_effect = exception.Duplicate self.assertRaises(exception.Duplicate, self.policy.add_image) # If the exception is HTTPForbidden and the owner differs, # make sure we get the proper message translation self.enforcer.enforce.side_effect = webob.exc.HTTPForbidden('original') exc = self.assertRaises(webob.exc.HTTPForbidden, self.policy.add_image) self.assertIn('You are not permitted to create images owned by', str(exc)) # If the owner does not differ, make sure we get the original reason self.policy = policy.ImageAPIPolicy(self.context, {}, enforcer=self.enforcer) exc = self.assertRaises(webob.exc.HTTPForbidden, self.policy.add_image) self.assertIn('original', str(exc)) def test_delete_image(self): self.policy.delete_image() self.enforcer.enforce.assert_called_once_with(self.context, 'delete_image', mock.ANY) def test_delete_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.delete_image() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.delete_image() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.delete_image) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.delete_image() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=True and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.delete_image() self.assertFalse(m.called) def test_upload_image(self): self.policy.upload_image() self.enforcer.enforce.assert_called_once_with(self.context, 'upload_image', mock.ANY) def test_upload_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.upload_image() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.upload_image() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.upload_image) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.upload_image() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=True and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.upload_image() self.assertFalse(m.called) def test_download_image(self): self.policy.download_image() self.enforcer.enforce.assert_called_once_with(self.context, 'download_image', mock.ANY) def test_modify_image(self): self.policy.modify_image() self.enforcer.enforce.assert_called_once_with(self.context, 'modify_image', mock.ANY) def test_modify_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.modify_image() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.modify_image() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.modify_image) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.modify_image() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=True and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.modify_image() self.assertFalse(m.called) def test_deactivate_image(self): self.policy.deactivate_image() self.enforcer.enforce.assert_called_once_with(self.context, 'deactivate', mock.ANY) def test_deactivate_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.deactivate_image() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.delete_image() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.deactivate_image) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.deactivate_image() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=Truei and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.deactivate_image() self.assertFalse(m.called) def test_reactivate_image(self): self.policy.reactivate_image() self.enforcer.enforce.assert_called_once_with(self.context, 'reactivate', mock.ANY) def test_reactivate_image_falls_back_to_legacy(self): self.config(enforce_new_defaults=False, group='oslo_policy') self.config(enforce_scope=False, group='oslo_policy') # As admin, image is mutable even if owner does not match self.context.is_admin = True self.context.owner = 'someuser' self.image.owner = 'someotheruser' self.policy.reactivate_image() # As non-admin, owner matches, so we're good self.context.is_admin = False self.context.owner = 'someuser' self.image.owner = 'someuser' self.policy.delete_image() # If owner does not match, we fail self.image.owner = 'someotheruser' self.assertRaises(exception.Forbidden, self.policy.reactivate_image) # Make sure we are checking the legacy handler with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.reactivate_image() m.assert_called_once_with(self.context, self.image) # Make sure we are not checking it if enforce_new_defaults=True and # enforce_scope=True self.config(enforce_new_defaults=True, group='oslo_policy') self.config(enforce_scope=True, group='oslo_policy') with mock.patch('glance.api.v2.policy.check_is_image_mutable') as m: self.policy.reactivate_image() self.assertFalse(m.called) def test_copy_image(self): self.policy.copy_image() self.enforcer.enforce.assert_called_once_with(self.context, 'copy_image', mock.ANY) class TestMetadefAPIPolicy(APIPolicyBase): def setUp(self): super(TestMetadefAPIPolicy, self).setUp() self.enforcer = mock.MagicMock() self.md_resource = mock.MagicMock() self.context = mock.MagicMock() self.policy = policy.MetadefAPIPolicy(self.context, self.md_resource, enforcer=self.enforcer) def test_enforce(self): self.assertRaises(webob.exc.HTTPNotFound, super(TestMetadefAPIPolicy, self).test_enforce) def test_get_metadef_namespace(self): self.policy.get_metadef_namespace() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_namespace', mock.ANY) def test_get_metadef_namespaces(self): self.policy.get_metadef_namespaces() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_namespaces', mock.ANY) def test_add_metadef_namespace(self): self.policy.add_metadef_namespace() self.enforcer.enforce.assert_called_once_with(self.context, 'add_metadef_namespace', mock.ANY) def test_modify_metadef_namespace(self): self.policy.modify_metadef_namespace() self.enforcer.enforce.assert_called_once_with( self.context, 'modify_metadef_namespace', mock.ANY) def test_delete_metadef_namespace(self): self.policy.delete_metadef_namespace() self.enforcer.enforce.assert_called_once_with( self.context, 'delete_metadef_namespace', mock.ANY) def test_get_metadef_objects(self): self.policy.get_metadef_objects() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_objects', mock.ANY) def test_get_metadef_object(self): self.policy.get_metadef_object() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_object', mock.ANY) def test_add_metadef_object(self): self.policy.add_metadef_object() self.enforcer.enforce.assert_called_once_with(self.context, 'add_metadef_object', mock.ANY) def test_modify_metadef_object(self): self.policy.modify_metadef_object() self.enforcer.enforce.assert_called_once_with(self.context, 'modify_metadef_object', mock.ANY) def test_delete_metadef_object(self): self.policy.delete_metadef_object() self.enforcer.enforce.assert_called_once_with(self.context, 'delete_metadef_object', mock.ANY) def test_add_metadef_tag(self): self.policy.add_metadef_tag() self.enforcer.enforce.assert_called_once_with(self.context, 'add_metadef_tag', mock.ANY) def test_add_metadef_tags(self): self.policy.add_metadef_tags() self.enforcer.enforce.assert_called_once_with(self.context, 'add_metadef_tags', mock.ANY) def test_get_metadef_tags(self): self.policy.get_metadef_tags() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_tags', mock.ANY) def test_get_metadef_tag(self): self.policy.get_metadef_tag() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_tag', mock.ANY) def modify_metadef_tag(self): self.policy.modify_metadef_tag() self.enforcer.enforce.assert_called_once_with(self.context, 'modify_metadef_tag', mock.ANY) def test_delete_metadef_tags(self): self.policy.delete_metadef_tags() self.enforcer.enforce.assert_called_once_with(self.context, 'delete_metadef_tags', mock.ANY) def test_delete_metadef_tag(self): self.policy.delete_metadef_tag() self.enforcer.enforce.assert_called_once_with(self.context, 'delete_metadef_tag', mock.ANY) def test_add_metadef_property(self): self.policy.add_metadef_property() self.enforcer.enforce.assert_called_once_with(self.context, 'add_metadef_property', mock.ANY) def test_get_metadef_properties(self): self.policy.get_metadef_properties() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_properties', mock.ANY) def test_get_metadef_property(self): self.policy.get_metadef_property() self.enforcer.enforce.assert_called_once_with(self.context, 'get_metadef_property', mock.ANY) def test_modify_metadef_property(self): self.policy.modify_metadef_property() self.enforcer.enforce.assert_called_once_with( self.context, 'modify_metadef_property', mock.ANY) def test_remove_metadef_property(self): self.policy.remove_metadef_property() self.enforcer.enforce.assert_called_once_with( self.context, 'remove_metadef_property', mock.ANY) def test_add_metadef_resource_type_association(self): self.policy.add_metadef_resource_type_association() self.enforcer.enforce.assert_called_once_with( self.context, 'add_metadef_resource_type_association', mock.ANY) def test_list_metadef_resource_types(self): self.policy.list_metadef_resource_types() self.enforcer.enforce.assert_called_once_with( self.context, 'list_metadef_resource_types', mock.ANY) def test_enforce_exception_behavior(self): with mock.patch.object(self.policy.enforcer, 'enforce') as mock_enf: # First make sure we can update if allowed self.policy.modify_metadef_namespace() self.assertTrue(mock_enf.called) # Make sure that if modify_metadef_namespace and # get_metadef_namespace both return Forbidden then we # should get NotFound. This is because we are not allowed # to modify the namespace, nor see that it even exists. mock_enf.reset_mock() mock_enf.side_effect = exception.Forbidden self.assertRaises(webob.exc.HTTPNotFound, self.policy.modify_metadef_namespace) # Make sure we checked modify_metadef_namespace, and then # get_metadef_namespace. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'modify_metadef_namespace', mock.ANY), mock.call(mock.ANY, 'get_metadef_namespace', mock.ANY)]) # Make sure that if modify_metadef_namespace is disallowed, but # get_metadef_namespace is allowed, that we get Forbidden. This is # because we are allowed to see the namespace, but not modify # it, so 403 indicates that without confusing the user and # returning "not found" for a namespace they are able to GET. mock_enf.reset_mock() mock_enf.side_effect = [exception.Forbidden, lambda *a: None] self.assertRaises(webob.exc.HTTPForbidden, self.policy.modify_metadef_namespace) # Make sure we checked modify_metadef_namespace, and then # get_metadef_namespace. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'modify_metadef_namespace', mock.ANY), mock.call(mock.ANY, 'get_metadef_namespace', mock.ANY)]) def test_get_metadef_resource_type(self): self.policy.get_metadef_resource_type() self.enforcer.enforce.assert_called_once_with( self.context, 'get_metadef_resource_type', mock.ANY) def test_remove_metadef_resource_type_association(self): self.policy.remove_metadef_resource_type_association() self.enforcer.enforce.assert_called_once_with( self.context, 'remove_metadef_resource_type_association', mock.ANY) class TestMemberAPIPolicy(utils.BaseTestCase): def setUp(self): super(TestMemberAPIPolicy, self).setUp() self.enforcer = mock.MagicMock() self.image = mock.MagicMock() self.context = mock.MagicMock() self.policy = policy.MemberAPIPolicy(self.context, self.image, enforcer=self.enforcer) def test_enforce(self): # Enforce passes self.policy._enforce('fake_rule') expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'fake_rule', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_get_member(self): self.policy.get_member() expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'get_member', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_get_members(self): self.policy.get_members() expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'get_members', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_add_member(self): self.policy.add_member() expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'add_member', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_modify_member(self): self.policy.modify_member() expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'modify_member', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_delete_member(self): self.policy.delete_member() expected_calls = [ mock.call(self.context, 'get_image', mock.ANY), mock.call(self.context, 'delete_member', mock.ANY) ] self.enforcer.enforce.assert_has_calls(expected_calls) def test_enforce_exception_behavior(self): with mock.patch.object(self.policy.enforcer, 'enforce') as mock_enf: # First make sure we can update if allowed self.policy.modify_member() self.assertTrue(mock_enf.called) # Make sure that if while checking modify_member if get_image # both returns forbidden then we should get NotFound. This is # because we are not allowed to fetch image details. mock_enf.reset_mock() mock_enf.side_effect = exception.Forbidden self.assertRaises(webob.exc.HTTPNotFound, self.policy.modify_member) # Make sure we checked modify_image, and then get_image. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'get_image', mock.ANY)]) # Make sure that if modify_member is disallowed, but # get_image is allowed, that we get Forbidden. This is # because we are allowed to see the image, but not modify # it, so 403 indicates that without confusing the user and # returning "not found" for an image they are able to GET. mock_enf.reset_mock() mock_enf.side_effect = [lambda *a: None, exception.Forbidden] self.assertRaises(webob.exc.HTTPForbidden, self.policy.modify_member) # Make sure we checked get_image, and then modify_member. mock_enf.assert_has_calls([ mock.call(mock.ANY, 'get_image', mock.ANY), mock.call(mock.ANY, 'modify_member', mock.ANY)]) class TestTasksAPIPolicy(APIPolicyBase): def setUp(self): super(TestTasksAPIPolicy, self).setUp() self.enforcer = mock.MagicMock() self.context = mock.MagicMock() self.policy = policy.TasksAPIPolicy(self.context, enforcer=self.enforcer) def test_tasks_api_access(self): self.policy.tasks_api_access() self.enforcer.enforce.assert_called_once_with(self.context, 'tasks_api_access', mock.ANY) class TestCacheImageAPIPolicy(utils.BaseTestCase): def setUp(self): super(TestCacheImageAPIPolicy, self).setUp() self.enforcer = mock.MagicMock() self.context = mock.MagicMock() def test_manage_image_cache(self): self.policy = policy.CacheImageAPIPolicy( self.context, enforcer=self.enforcer, policy_str='manage_image_cache') self.policy.manage_image_cache() self.enforcer.enforce.assert_called_once_with(self.context, 'manage_image_cache', mock.ANY) def test_manage_image_cache_with_cache_delete(self): self.policy = policy.CacheImageAPIPolicy( self.context, enforcer=self.enforcer, policy_str='cache_delete') self.policy.manage_image_cache() self.enforcer.enforce.assert_called_once_with(self.context, 'cache_delete', mock.ANY) def test_manage_image_cache_with_cache_list(self): self.policy = policy.CacheImageAPIPolicy( self.context, enforcer=self.enforcer, policy_str='cache_list') self.policy.manage_image_cache() self.enforcer.enforce.assert_called_once_with(self.context, 'cache_list', mock.ANY) def test_manage_image_cache_with_cache_image(self): self.policy = policy.CacheImageAPIPolicy( self.context, enforcer=self.enforcer, policy_str='cache_image') self.policy.manage_image_cache() self.enforcer.enforce.assert_called_once_with(self.context, 'cache_image', mock.ANY) class TestDiscoveryAPIPolicy(APIPolicyBase): def setUp(self): super(TestDiscoveryAPIPolicy, self).setUp() self.enforcer = mock.MagicMock() self.context = mock.MagicMock() self.policy = policy.DiscoveryAPIPolicy( self.context, enforcer=self.enforcer) def test_stores_info_detail(self): self.policy.stores_info_detail() self.enforcer.enforce.assert_called_once_with(self.context, 'stores_info_detail', mock.ANY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/utils.py0000664000175000017500000006200700000000000017170 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import http.client import http.server import io import os import shlex import shutil import signal import socket import subprocess import threading import time from unittest import mock from alembic import command as alembic_command import fixtures from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as cfg_fixture from oslo_log.fixture import logging_error as log_fixture from oslo_log import log from oslo_utils import timeutils from oslo_utils import units import testtools import webob from glance.api.v2 import cached_images from glance.common import config from glance.common import exception from glance.common import property_utils from glance.common import utils from glance.common import wsgi from glance import context from glance.db.sqlalchemy import alembic_migrations from glance.db.sqlalchemy import api as db_api from glance.tests.unit import fixtures as glance_fixtures CONF = cfg.CONF LOG = log.getLogger(__name__) try: CONF.debug except cfg.NoSuchOptError: # NOTE(sigmavirus24): If we run the entire test suite, the logging options # will be registered appropriately and we do not need to re-register them. # However, when we run a test in isolation (or use --debug), those options # will not be registered for us. In order for a test in a class that # inherits from BaseTestCase to even run, we will need to register them # ourselves. BaseTestCase.config will set the debug level if something # calls self.config(debug=True) so we need these options registered # appropriately. # See bug 1433785 for more details. log.register_options(CONF) class BaseTestCase(testtools.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) self.mock_object(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) self.test_dir = self.useFixture(fixtures.TempDir()).path self.test_dir2 = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(self.conf_dir) self.lock_dir = os.path.join(self.test_dir, 'locks') utils.safe_mkdirs(self.lock_dir) lockutils.set_defaults(self.lock_dir) self.set_policy() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging()) if cached_images.WORKER: cached_images.WORKER.terminate() cached_images.WORKER = None def set_policy(self): conf_file = "policy.yaml" self.policy_file = self._copy_data_file(conf_file, self.conf_dir) self.config(policy_file=self.policy_file, group='oslo_policy') def set_property_protections(self, use_policies=False): self.unset_property_protections() conf_file = "property-protections.conf" if use_policies: conf_file = "property-protections-policies.conf" self.config(property_protection_rule_format="policies") self.property_file = self._copy_data_file(conf_file, self.test_dir) self.config(property_protection_file=self.property_file) def unset_property_protections(self): for section in property_utils.CONFIG.sections(): property_utils.CONFIG.remove_section(section) def _copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glance/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def set_property_protection_rules(self, rules): with open(self.property_file, 'w') as f: for rule_key in rules.keys(): f.write('[%s]\n' % rule_key) for operation in rules[rule_key].keys(): roles_str = ','.join(rules[rule_key][operation]) f.write('%s = %s\n' % (operation, roles_str)) def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ self._config_fixture.config(**kw) def mock_object(self, obj, attr_name, *args, **kwargs): """"Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ patcher = mock.patch.object(obj, attr_name, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result def delay_inaccurate_clock(self, duration=0.001): """Add a small delay to compensate for inaccurate system clocks. Some tests make assertions based on timestamps (e.g. comparing 'created_at' and 'updated_at' fields). In some cases, subsequent time.time() calls may return identical values (python timestamps can have a lower resolution on Windows compared to Linux - 1e-7 as opposed to 1e-9). A small delay (a few ms should be negligeable) can prevent such issues. At the same time, it spares us from mocking the time module, which might be undesired. """ # For now, we'll do this only for Windows. If really needed, # on Py3 we can get the clock resolution using time.get_clock_info, # but at that point we may as well just sleep 1ms all the time. if os.name == 'nt': time.sleep(duration) class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): if os.name != 'nt': cmd = 'which %s' % self.exe else: cmd = 'where.exe', '%s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): test_obj.skipTest(message) func(*a, **kwargs) return wrapped def fork_exec(cmd, exec_env=None, logfile=None, pass_fds=None): """ Execute a command using fork/exec. This is needed for programs system executions that need path searching but cannot have a shell as their parent process, for example: glance-api. When glance-api starts it sets itself as the parent process for its own process group. Thus the pid that a Popen process would have is not the right pid to use for killing the process group. This patch gives the test env direct access to the actual pid. :param cmd: Command to execute as an array of arguments. :param exec_env: A dictionary representing the environment with which to run the command. :param logfile: A path to a file which will hold the stdout/err of the child process. :param pass_fds: Sequence of file descriptors passed to the child. """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val pid = os.fork() if pid == 0: if logfile: fds = [1, 2] with open(logfile, 'r+b') as fptr: for desc in fds: # close fds try: os.dup2(fptr.fileno(), desc) except OSError: pass if pass_fds: for fd in pass_fds: os.set_inheritable(fd, True) args = shlex.split(cmd) os.execvpe(args[0], args, env) else: return pid def wait_for_fork(pid, raise_error=True, expected_exitcode=0, force=True): """ Wait for a process to complete This function will wait for the given pid to complete. If the exit code does not match that of the expected_exitcode an error is raised. """ # For the first period, we wait without being pushy, but after # this timer expires, we start sending SIGTERM term_timer = timeutils.StopWatch(5) term_timer.start() # After this timer expires we start sending SIGKILL nice_timer = timeutils.StopWatch(7) nice_timer.start() # Process gets a maximum amount of time to exit before we fail the # test total_timer = timeutils.StopWatch(10) total_timer.start() while not total_timer.expired(): try: cpid, rc = os.waitpid(pid, force and os.WNOHANG or 0) if cpid == 0 and force: if not term_timer.expired(): # Waiting for exit on first signal pass elif not nice_timer.expired(): # Politely ask the process to GTFO LOG.warning('Killing child %i with SIGTERM', pid) os.kill(pid, signal.SIGTERM) else: # No more Mr. Nice Guy LOG.warning('Killing child %i with SIGKILL', pid) os.kill(pid, signal.SIGKILL) expected_exitcode = signal.SIGKILL time.sleep(1) continue LOG.info('waitpid(%i) returned %i,%i', pid, cpid, rc) if rc != expected_exitcode: raise RuntimeError('The exit code %d is not %d' % (rc, expected_exitcode)) return rc except ChildProcessError: # Nothing to wait for return 0 except Exception as e: LOG.error('Got wait error: %s', e) if raise_error: raise raise RuntimeError('Gave up waiting for %i to exit!' % pid) def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute if os.name != 'nt': args = shlex.split(cmd) else: args = cmd executable = args[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = ("Command %(cmd)s did not succeed. Returned an exit " "code of %(exitcode)d." "\n\nSTDOUT: %(out)s" "\n\nSTDERR: %(err)s" % {'cmd': cmd, 'exitcode': exitcode, 'out': out, 'err': err}) if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ port, s = get_unused_port_and_socket() s.close() return port def get_unused_port_and_socket(): """ Returns an unused port on localhost and the open socket from which it was created. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() return (port, s) def get_unused_port_ipv6(): """ Returns an unused port on localhost on IPv6 (uses ::1). """ port, s = get_unused_port_and_socket_ipv6() s.close() return port def get_unused_port_and_socket_ipv6(): """ Returns an unused port on localhost and the open socket from which it was created, but uses IPv6 (::1). """ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.bind(('::1', 0)) # Ignoring flowinfo and scopeid... addr, port, flowinfo, scopeid = s.getsockname() return (port, s) def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, value) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write(b"XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', b'1') except IOError as e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = { 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) def start_http_server(image_id, image_data): def _get_http_handler_class(fixture): class StaticHTTPRequestHandler(http.server.BaseHTTPRequestHandler): def do_GET(self): self.send_response(http.client.OK) self.send_header('Content-Length', str(len(fixture))) self.end_headers() self.wfile.write(fixture.encode('latin-1')) return def do_HEAD(self): # reserve non_existing_image_path for the cases where we expect # 404 from the server if 'non_existing_image_path' in self.path: self.send_response(http.client.NOT_FOUND) else: self.send_response(http.client.OK) self.send_header('Content-Length', str(len(fixture))) self.end_headers() return def log_message(self, *args, **kwargs): # Override this method to prevent debug output from going # to stderr during testing return return StaticHTTPRequestHandler server_address = ('127.0.0.1', 0) handler_class = _get_http_handler_class(image_data) httpd = http.server.HTTPServer(server_address, handler_class) port = httpd.socket.getsockname()[1] thread = threading.Thread(target=httpd.serve_forever) thread.daemon = True thread.start() return thread, httpd, port class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_token = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_token: user, tenant, role = auth_token.split(':') if tenant.lower() == 'none': tenant = None roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, 'auth_token': auth_token, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=http.client.OK, headers=None, data=None, *args, **kwargs): data = data or b'I am a teapot, short and stout\n' self.data = io.BytesIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default) def getheaders(self): return self.headers or {} def read(self, amt): self.data.read(amt) class Httplib2WsgiAdapter(object): def __init__(self, app): self.app = app def request(self, uri, method="GET", body=None, headers=None): req = webob.Request.blank(uri, method=method, headers=headers) if isinstance(body, str): req.body = body.encode('utf-8') else: req.body = body resp = req.get_response(self.app) return Httplib2WebobResponse(resp), resp.body.decode('utf-8') class Httplib2WebobResponse(object): def __init__(self, webob_resp): self.webob_resp = webob_resp @property def status(self): return self.webob_resp.status_code def __getitem__(self, key): return self.webob_resp.headers[key] def get(self, key): return self.webob_resp.headers[key] @property def allow(self): return self.webob_resp.allow @allow.setter def allow(self, allowed): if type(allowed) is not str: raise TypeError('Allow header should be a str') self.webob_resp.allow = allowed class HttplibWsgiAdapter(object): def __init__(self, app): self.app = app self.req = None def request(self, method, url, body=None, headers=None): if headers is None: headers = {} self.req = webob.Request.blank(url, method=method, headers=headers) self.req.body = body def getresponse(self): response = self.req.get_response(self.app) return FakeHTTPResponse(response.status_code, response.headers, response.body) def db_sync(version='heads', engine=None): """Migrate the database to `version` or the most recent version.""" if engine is None: engine = db_api.get_engine() alembic_config = alembic_migrations.get_alembic_config(engine=engine) alembic_command.upgrade(alembic_config, version) def start_standalone_http_server(): def _get_http_handler_class(): class StaticHTTPRequestHandler(http.server.BaseHTTPRequestHandler): def do_GET(self): data = b"Hello World!!!" self.send_response(http.client.OK) self.send_header('Content-Length', str(len(data))) self.end_headers() self.wfile.write(data) return return StaticHTTPRequestHandler server_address = ('127.0.0.1', 0) handler_class = _get_http_handler_class() httpd = http.server.HTTPServer(server_address, handler_class) port = httpd.socket.getsockname()[1] thread = threading.Thread(target=httpd.serve_forever) thread.daemon = True thread.start() return thread, httpd, port class FakeData(object): """Generate a bunch of data without storing it in memory. This acts like a read-only file object which generates fake data in chunks when read() is called or it is used as a generator. It can generate an arbitrary amount of data without storing it in memory. :param length: The number of bytes to generate :param chunk_size: The chunk size to return in iteration mode, or when read() is called unbounded """ def __init__(self, length, chunk_size=64 * units.Ki): self._max = length self._chunk_size = chunk_size self._len = 0 def read(self, length=None): if length is None: length = self._chunk_size length = min(length, self._max - self._len) self._len += length if length == 0: return b'' else: return b'0' * length def __iter__(self): return self def __next__(self): r = self.read() if len(r) == 0: raise StopIteration() else: return r ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/glance/tests/var/0000775000175000017500000000000000000000000016241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/ca.crt0000664000175000017500000000240500000000000017337 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDiTCCAnGgAwIBAgIJAMj+Lfpqc9lLMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMRIwEAYDVQQKDAlPcGVuU3RhY2sx DzANBgNVBAsMBkdsYW5jZTESMBAGA1UEAwwJR2xhbmNlIENBMB4XDTE1MDEzMTA1 MzAyNloXDTI1MDEyODA1MzAyNlowWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNv bWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xhbmNlMRIw EAYDVQQDDAlHbGFuY2UgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB AQDcW4cRtw96/ZYsx3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf 4wyRU1ROb/N5L4KdQiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzV ta6HP9UPeyfXrS+jgjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB 0jXm1nSG7FZUbojUCYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquX OzebskY5EL/okQGPofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+ 7BA2ZKfzT5t8G/8lSjKa/r2pAgMBAAGjUDBOMB0GA1UdDgQWBBT3M/WuigtS7JYZ QD0XJEDD8JSZrTAfBgNVHSMEGDAWgBT3M/WuigtS7JYZQD0XJEDD8JSZrTAMBgNV HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWOhC9kBZAJalQhAeNGIiiJ2bV HpvzSCEXSEAdh3A0XDK1KxoMHy1LhNGYrMmN2a+2O3SoX0FLB4p9zOifq4ACwaMD CjQeB/whsfPt5s0gV3mGMCR+V2b8r5H/30KRbIzQGXmy+/r6Wfe012jcVVXsQawW Omd4d+Bduf5iiL1OCKEMepqjQLu7Yg41ucRpUewBA+A9hoKp7jpwSnzSALX7FWEQ TBJtJ9jEnZl36S81eZJvOXSzeptHyomSAt8eGFCVuPB0dZCXuBNLu4Gsn+dIhfyj NwK4noYZXMndPwGy92KDhjxVnHzd9HwImgr6atmWhPPz5hm50BrA7sv06Nto -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/ca.key0000664000175000017500000000325000000000000017336 0ustar00zuulzuul00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDcW4cRtw96/ZYs x3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf4wyRU1ROb/N5L4Kd QiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzVta6HP9UPeyfXrS+j gjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB0jXm1nSG7FZUbojU CYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquXOzebskY5EL/okQGP ofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+7BA2ZKfzT5t8G/8l SjKa/r2pAgMBAAECggEABeoS+v+906BAypzj4BO+xnUEWi1xuN7j951juqKM0dwm uZSaEwMb9ysVXCNvKNgwOypQZfaNQ2BqEgx3XOA5yZBVabvtOkIFZ6RZp7kZ3aQl yb9U3BR0WAsz0pxZL3c74vdsoYi9rgVA9ROGvP4CIM96fEZ/xgDnhbFjch5GA4u2 8XQ/kJUwLl0Uzxyo10sqGu3hgMwpM8lpaRW6d5EQ628rJEtA/Wmy5GpyCUhTD/5B jE1IzhjT4T5LqiPjA/Dsmz4Sa0+MyKRmA+zfSH6uS4szSaj53GVMHh4K+Xg2/EeD 6I3hGOtzZuYp5HBHE6J8VgeuErBQf32CCglHqN/dLQKBgQD4XaXa+AZtB10cRUV4 LZDB1AePJLloBhKikeTboZyhZEwbNuvw3JSQBAfUdpx3+8Na3Po1Tfy3DlZaVCU2 0PWh2UYrtwA3dymp8GCuSvnsLz1kNGv0Q7WEYaepyKRO8qHCjrTDUFuGVztU+H6O OWPHRd4DnyF3pKN7K4j6pU76HwKBgQDjIXylwPb6TD9ln13ijJ06t9l1E13dSS0B +9QU3f4abjMmW0K7icrNdmsjHafWLGXP2dxB0k4sx448buH+L8uLjC8G80wLQMSJ NAKpxIsmkOMpPUl80ks8bmzsqztmtql6kAgSwSW84vftJyNrFnp2kC2O4ZYGwz1+ 8rj3nBrfNwKBgQDrCJxCyoIyPUy0yy0BnIUnmAILSSKXuV97LvtXiOnTpTmMa339 8pA4dUf/nLtXpA3r98BkH0gu50d6tbR92mMI5bdM+SIgWwk3g33KkrNN+iproFwk zMqC23Mx7ejnuR6xIiEXz/y89eH0+C+zYcX1tz1xSe7+7PO0RK+dGkDR2wKBgHGR L+MtPhDfCSAF9IqvpnpSrR+2BEv+J8wDIAMjEMgka9z06sQc3NOpL17KmD4lyu6H z3L19fK8ASnEg6l2On9XI7iE9HP3+Y1k/SPny3AIKB1ZsKICAG6CBGK+J6BvGwTW ecLu4rC0iCUDWdlUzvzzkGQN9dcBzoDoWoYsft83AoGAAh4MyrM32gwlUgQD8/jX 8rsJlKnme0qMjX4A66caBomjztsH2Qt6cH7DIHx+hU75pnDAuEmR9xqnX7wFTR9Y 0j/XqTVsTjDINRLgMkrg7wIqKtWdicibBx1ER9LzwfNwht/ZFeMLdeUUUYMNv3cg cMSLxlxgFaUggYj/dsF6ypQ= -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/certificate.crt0000664000175000017500000001261600000000000021243 0ustar00zuulzuul00000000000000# > openssl x509 -in glance/tests/var/certificate.crt -noout -text # Certificate: # Data: # Version: 1 (0x0) # Serial Number: 1 (0x1) # Signature Algorithm: sha1WithRSAEncryption # Issuer: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=Glance CA # Validity # Not Before: Feb 2 20:22:13 2015 GMT # Not After : Jan 31 20:22:13 2024 GMT # Subject: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=127.0.0.1 # Subject Public Key Info: # Public Key Algorithm: rsaEncryption # RSA Public Key: (4096 bit) # Modulus (4096 bit): # 00:9f:44:13:51:de:e9:5a:f7:ac:33:2a:1a:4c:91: # a1:73:bc:f3:a6:d3:e6:59:ae:e8:e2:34:68:3e:f4: # 40:c1:a1:1a:65:9a:a3:67:e9:2c:b9:79:9c:00:b1: # 7c:c1:e6:9e:de:47:bf:f1:cb:f2:73:d4:c3:62:fe: # 82:90:6f:b4:75:ca:7e:56:8f:99:3d:06:51:3c:40: # f4:ff:74:97:4f:0d:d2:e6:66:76:8d:97:bf:89:ce: # fe:b2:d7:89:71:f2:a0:d9:f5:26:7c:1a:7a:bf:2b: # 8f:72:80:e7:1f:4d:4a:40:a3:b9:9e:33:f6:55:e0: # 40:2b:1e:49:e4:8c:71:9d:11:32:cf:21:41:e1:13: # 28:c6:d6:f6:e0:b3:26:10:6d:5b:63:1d:c3:ee:d0: # c4:66:63:38:89:6b:8f:2a:c2:bd:4f:e4:bc:03:8f: # a2:f2:5c:1d:73:11:9c:7b:93:3d:d6:a3:d1:2d:cd: # 64:23:24:bc:65:3c:71:20:28:60:a0:ea:fe:77:0e: # 1d:95:36:76:ad:e7:2f:1c:27:62:55:e3:9d:11:c1: # fb:43:3e:e5:21:ac:fd:0e:7e:3d:c9:44:d2:bd:6f: # 89:7e:0f:cb:88:54:57:fd:8d:21:c8:34:e1:47:01: # 28:0f:45:a1:7e:60:1a:9c:4c:0c:b8:c1:37:2d:46: # ab:18:9e:ca:49:d3:77:b7:92:3a:d2:7f:ca:d5:02: # f1:75:81:66:39:51:aa:bc:d7:f0:91:23:69:e8:71: # ae:44:76:5e:87:54:eb:72:fc:ac:fd:60:22:e0:6a: # e4:ad:37:b7:f6:e5:24:b4:95:2c:26:0e:75:a0:e9: # ed:57:be:37:42:64:1f:02:49:0c:bd:5d:74:6d:e6: # f2:da:5c:54:82:fa:fc:ff:3a:e4:1a:7a:a9:3c:3d: # ee:b5:df:09:0c:69:c3:51:92:67:80:71:9b:10:8b: # 20:ff:a2:5e:c5:f2:86:a0:06:65:1c:42:f9:91:24: # 54:29:ed:7e:ec:db:4c:7b:54:ee:b1:25:1b:38:53: # ae:01:b6:c5:93:1e:a3:4d:1b:e8:73:47:50:57:e8: # ec:a0:80:53:b1:34:74:37:9a:c1:8c:14:64:2e:16: # dd:a1:2e:d3:45:3e:2c:46:62:20:2a:93:7a:92:4c: # b2:cc:64:47:ad:63:32:0b:68:0c:24:98:20:83:08: # 35:74:a7:68:7a:ef:d6:84:07:d1:5e:d7:c0:6c:3f: # a7:4a:78:62:a8:70:75:37:fb:ce:1f:09:1e:7c:11: # 35:cc:b3:5a:a3:cc:3f:35:c9:ee:24:6f:63:f8:54: # 6f:7c:5b:b4:76:3d:f2:81:6d:ad:64:66:10:d0:c4: # 0b:2c:2f # Exponent: 65537 (0x10001) # Signature Algorithm: sha1WithRSAEncryption # 5f:e8:a8:93:20:6c:0f:12:90:a6:e2:64:21:ed:63:0e:8c:e0: # 0f:d5:04:13:4d:2a:e9:a5:91:b7:e4:51:94:bd:0a:70:4b:94: # c7:1c:94:ed:d7:64:95:07:6b:a1:4a:bc:0b:53:b5:1a:7e:f1: # 9c:12:59:24:5f:36:72:34:ca:33:ee:28:46:fd:21:e6:52:19: # 0c:3d:94:6b:bd:cb:76:a1:45:7f:30:7b:71:f1:84:b6:3c:e0: # ac:af:13:81:9c:0e:6e:3c:9b:89:19:95:de:8e:9c:ef:70:ac: # 07:ae:74:42:47:35:50:88:36:ec:32:1a:55:24:08:f2:44:57: # 67:fe:0a:bb:6b:a7:bd:bc:af:bf:2a:e4:dd:53:84:6b:de:1d: # 2a:28:21:38:06:7a:5b:d8:83:15:65:31:6d:61:67:00:9e:1a: # 61:85:15:a2:4c:9a:eb:6d:59:8e:34:ac:2c:d5:24:4e:00:ff: # 30:4d:a3:d5:80:63:17:52:65:ac:7f:f4:0a:8e:56:a4:97:51: # 39:81:ae:e8:cb:52:09:b3:47:b4:fd:1b:e2:04:f9:f2:76:e3: # 63:ef:90:aa:54:98:96:05:05:a9:91:76:18:ed:5d:9e:6e:88: # 50:9a:f7:2c:ce:5e:54:ba:15:ec:62:ff:5d:be:af:35:03:b1: # 3f:32:3e:0e -----BEGIN CERTIFICATE----- MIIEKjCCAxICAQEwDQYJKoZIhvcNAQEFBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV BAgMClNvbWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xh bmNlMRIwEAYDVQQDDAlHbGFuY2UgQ0EwHhcNMTUwMjAyMjAyMjEzWhcNMjQwMTMx MjAyMjEzWjBbMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTESMBAG A1UEChMJT3BlblN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEjAQBgNVBAMTCTEyNy4w LjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ9EE1He6Vr3rDMq GkyRoXO886bT5lmu6OI0aD70QMGhGmWao2fpLLl5nACxfMHmnt5Hv/HL8nPUw2L+ gpBvtHXKflaPmT0GUTxA9P90l08N0uZmdo2Xv4nO/rLXiXHyoNn1Jnwaer8rj3KA 5x9NSkCjuZ4z9lXgQCseSeSMcZ0RMs8hQeETKMbW9uCzJhBtW2Mdw+7QxGZjOIlr jyrCvU/kvAOPovJcHXMRnHuTPdaj0S3NZCMkvGU8cSAoYKDq/ncOHZU2dq3nLxwn YlXjnRHB+0M+5SGs/Q5+PclE0r1viX4Py4hUV/2NIcg04UcBKA9FoX5gGpxMDLjB Ny1GqxieyknTd7eSOtJ/ytUC8XWBZjlRqrzX8JEjaehxrkR2XodU63L8rP1gIuBq 5K03t/blJLSVLCYOdaDp7Ve+N0JkHwJJDL1ddG3m8tpcVIL6/P865Bp6qTw97rXf CQxpw1GSZ4BxmxCLIP+iXsXyhqAGZRxC+ZEkVCntfuzbTHtU7rElGzhTrgG2xZMe o00b6HNHUFfo7KCAU7E0dDeawYwUZC4W3aEu00U+LEZiICqTepJMssxkR61jMgto DCSYIIMINXSnaHrv1oQH0V7XwGw/p0p4YqhwdTf7zh8JHnwRNcyzWqPMPzXJ7iRv Y/hUb3xbtHY98oFtrWRmENDECywvAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAF/o qJMgbA8SkKbiZCHtYw6M4A/VBBNNKumlkbfkUZS9CnBLlMcclO3XZJUHa6FKvAtT tRp+8ZwSWSRfNnI0yjPuKEb9IeZSGQw9lGu9y3ahRX8we3HxhLY84KyvE4GcDm48 m4kZld6OnO9wrAeudEJHNVCINuwyGlUkCPJEV2f+Crtrp728r78q5N1ThGveHSoo ITgGelvYgxVlMW1hZwCeGmGFFaJMmuttWY40rCzVJE4A/zBNo9WAYxdSZax/9AqO VqSXUTmBrujLUgmzR7T9G+IE+fJ242PvkKpUmJYFBamRdhjtXZ5uiFCa9yzOXlS6 Fexi/12+rzUDsT8yPg4= -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/privatekey.key0000664000175000017500000000625300000000000021144 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAn0QTUd7pWvesMyoaTJGhc7zzptPmWa7o4jRoPvRAwaEaZZqj Z+ksuXmcALF8weae3ke/8cvyc9TDYv6CkG+0dcp+Vo+ZPQZRPED0/3SXTw3S5mZ2 jZe/ic7+steJcfKg2fUmfBp6vyuPcoDnH01KQKO5njP2VeBAKx5J5IxxnREyzyFB 4RMoxtb24LMmEG1bYx3D7tDEZmM4iWuPKsK9T+S8A4+i8lwdcxGce5M91qPRLc1k IyS8ZTxxIChgoOr+dw4dlTZ2recvHCdiVeOdEcH7Qz7lIaz9Dn49yUTSvW+Jfg/L iFRX/Y0hyDThRwEoD0WhfmAanEwMuME3LUarGJ7KSdN3t5I60n/K1QLxdYFmOVGq vNfwkSNp6HGuRHZeh1Trcvys/WAi4GrkrTe39uUktJUsJg51oOntV743QmQfAkkM vV10beby2lxUgvr8/zrkGnqpPD3utd8JDGnDUZJngHGbEIsg/6JexfKGoAZlHEL5 kSRUKe1+7NtMe1TusSUbOFOuAbbFkx6jTRvoc0dQV+jsoIBTsTR0N5rBjBRkLhbd oS7TRT4sRmIgKpN6kkyyzGRHrWMyC2gMJJgggwg1dKdoeu/WhAfRXtfAbD+nSnhi qHB1N/vOHwkefBE1zLNao8w/NcnuJG9j+FRvfFu0dj3ygW2tZGYQ0MQLLC8CAwEA AQKCAgBL4IvvymqUu0CgE6P57LvlvxS522R4P7uV4W/05jtfxJgl5fmJzO5Q4x4u umB8pJn1vms1EHxPMQNxS1364C0ynSl5pepUx4i2UyAmAG8B680ZlaFPrgdD6Ykw vT0vO2/kx0XxhFAMef1aiQ0TvaftidMqCwmGOlN393Mu3rZWJVZ2lhqj15Pqv4lY 3iD5XJBYdVrekTmwqf7KgaLwtVyqDoiAjdMM8lPZeX965FhmxR8oWh0mHR9gf95J etMmdy6Km//+EbeS/HxWRnE0CD/RsQA7NmDFnXvmhsB6/j4EoHn5xB6ssbpGAxIg JwlY4bUrKXpaEgE7i4PYFb1q5asnTDdUZYAGAGXSBbDiUZM2YOe1aaFB/SA3Y3K2 47brnx7UXhAXSPJ16EZHejSeFbzZfWgj2J1t3DLk18Fpi/5AxxIy/N5J38kcP7xZ RIcSV1QEasYUrHI9buhuJ87tikDBDFEIIeLZxlyeIdwmKrQ7Vzny5Ls94Wg+2UtI XFLDak5SEugdp3LmmTJaugF+s/OiglBVhcaosoKRXb4K29M7mQv2huEAerFA14Bd dp2KByd8ue+fJrAiSxhAyMDAe/uv0ixnmBBtMH0YYHbfUIgl+kR1Ns/bxrJu7T7F kBQWZV4NRbSRB+RGOG2/Ai5jxu0uLu3gtHMO4XzzElWqzHEDoQKCAQEAzfaSRA/v 0831TDL8dmOCO61TQ9GtAa8Ouj+SdyTwk9f9B7NqQWg7qdkbQESpaDLvWYiftoDw mBFHLZe/8RHBaQpEAfbC/+DO6c7O+g1/0Cls33D5VaZOzFnnbHktT3r5xwkZfVBS aPPWl/IZOU8TtNqujQA+mmSnrJ7IuXSsBVq71xgBQT9JBZpUcjZ4eQducmtC43CP GqcSjq559ZKc/sa3PkAtNlKzSUS1abiMcJ86C9PgQ9gOu7y8SSqQ3ivZkVM99rxm wo8KehCcHOPOcIUQKmx4Bs4V3chm8rvygf3aanUHi83xaMeFtIIuOgAJmE9wGQeo k0UGvKBUDIenfwKCAQEAxfVFVxMBfI4mHrgTj/HOq7GMts8iykJK1PuELU6FZhex XOqXRbQ5dCLsyehrKlVPFqUENhXNHaOQrCOZxiVoRje2PfU/1fSqRaPxI7+W1Fsh Fq4PkdJ66NJZJkK5NHwE8SyQf+wpLdL3YhY5LM3tWdX5U9Rr6N8qelE3sLPssAak 1km4/428+rkp1BlCffr3FyL0KJmOYfMiAr8m6hRZWbhkvm5YqX1monxUrKdFJ218 dxzyniqoS1yU5RClY6783dql1UO4AvxpzpCPYDFIwbEb9zkUo0przhmi4KzyxknB /n/viMWzSnsM9YbakH6KunDTUteme1Dri3Drrq9TUQKCAQAVdvL7YOXPnxFHZbDl 7azu5ztcQAfVuxa/1kw/WnwwDDx0hwA13NUK+HNcmUtGbrh/DjwG2x032+UdHUmF qCIN/mHkCoF8BUPLHiB38tw1J3wPNUjm4jQoG96AcYiFVf2d/pbHdo2AHplosHRs go89M+UpELN1h7Ppy4qDuWMME86rtfa7hArqKJFQbdjUVC/wgLkx1tMzJeJLOGfB bgwqiS8jr7CGjsvcgOqfH/qS6iU0glpG98dhTWQaA/OhE9TSzmgQxMW41Qt0eTKr 2Bn1pAhxQ2im3Odue6ou9eNqJLiUi6nDqizUjKakj0SeCs71LqIyGZg58OGo2tSn kaOlAoIBAQCE/fO4vQcJpAJOLwLNePmM9bqAcoZ/9auKjPNO8OrEHPTGZMB+Tscu k+wa9a9RgICiyPgcUec8m0+tpjlAGo+EZRdlZqedWUMviCWQC74MKrD/KK9DG3IB ipfkEX2VmiBD2tm1Z3Z+17XlSuLci/iCmzNnM1XP3GYQSRIt/6Lq23vQjzTfU1z7 4HwOh23Zb0qjW5NG12sFuS9HQx6kskkY8r2UBlRAggP686Z7W+EkzPSKnYMN6cCo 6KkLf3RtlPlDHwq8TUOJlgSLhykbyeCEaDVOkSWhUnU8wJJheS+dMZ5IGbFWZOPA DQ02woOCAdG30ebXSBQL0uB8DL/52sYRAoIBAHtW3NomlxIMqWX8ZYRJIoGharx4 ikTOR/jeETb9t//n6kV19c4ICiXOQp062lwEqFvHkKzxKECFhJZuwFc09hVxUXxC LJjvDfauHWFHcrDTWWbd25CNeZ4Sq79GKf+HJ+Ov87WYcjuBFlCh8ES+2N4WZGCn B5oBq1g6E4p1k6xA5eE6VRiHPuFH8N9t1x6IlCZvZBhuVWdDrDd4qMSDEUTlcxSY mtcAIXTPaPcdb3CjdE5a38r59x7dZ/Te2K7FKETffjSmku7BrJITz3iXEk+sn8ex o3mdnFgeQ6/hxvMGgdK2qNb5ER/s0teFjnfnwHuTSXngMDIDb3kLL0ecWlQ= -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/testserver-bad-ovf.ova0000664000175000017500000002400000000000000022466 0ustar00zuulzuul00000000000000illegal-xml.ovf0000644000175000017500000000007612662226344012147 0ustar otcotc does not match <> testserver-disk1.vmdk0000644000175000017500000000000412562114301013301 0ustar otcotcABCD././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/testserver-no-disk.ova0000664000175000017500000005000000000000000022513 0ustar00zuulzuul00000000000000testserver.ovf0000644!00042560000003130712561117144014345 0ustar jjasekxintelall List of the virtual disks used in the package Logical networks used in the package Logical network used by this appliance. A virtual machine The kind of installed guest operating system Ubuntu_64 Ubuntu_64 Virtual hardware requirements for a virtual machine Virtual Hardware Family 0 testserver virtualbox-2.2 1 virtual CPU Number of virtual CPUs 1 virtual CPU 1 3 1 MegaBytes 512 MB of memory Memory Size 512 MB of memory 2 4 512 0 ideController0 IDE Controller ideController0 3 PIIX4 5 1 ideController1 IDE Controller ideController1 4 PIIX4 5 0 sataController0 SATA Controller sataController0 5 AHCI 20 0 usb USB Controller usb 6 23 3 false sound Sound Card sound 7 ensoniq1371 35 0 true cdrom1 CD-ROM Drive cdrom1 8 4 15 0 disk2 Disk Image disk2 /disk/vmdisk2 9 5 17 true Ethernet adapter on 'NAT' NAT Ethernet adapter on 'NAT' 10 E1000 10 Complete VirtualBox machine configuration in VirtualBox format ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/testserver-no-ovf.ova0000664000175000017500000002400000000000000022354 0ustar00zuulzuul00000000000000testserver-disk1.vmdk0000644!00042560000000000512561140034015506 0ustar jjasekxintelallABCD ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/testserver-not-tar.ova0000664000175000017500000050104100000000000022541 0ustar00zuulzuul00000000000000PNG  IHDR8ybKGD IDATx{秣n#˗M@?f:ӋN]&NtiNji;ĩ/:$&mۭ7-` $0F@B}w8Gn#?~w'7Ӗ83}+!i <紾bl3tIbCOl$Iּן?2ycvy;BUfUzTJk=>Vo}diIͥOOAmߑW}LhDZ'TX6ڣ'kZX9/-_H1<p37`e\צ[dmBUCo/&Wʯu=c- XD^j*9oYUT2L[ i՞$-WzM+~i3b9t8C߱1WNB}0V;?7T%[Z#cJJήʸN**f9{ly|LO򏾸k?_L@=75I̦uĶ2,.tw`Z}Vje)DOcO=cՑWvubD cM㿱k|>` p dr捗Kצd6ckӫ9cUL[KHZ{%=iӪ\T=Ԛ!8t/{z>`{p˖\&Vo:,@৩Xh92۹ղ&IW{bDO>T}%VUijY7ΞZ[m[˜?~w|:l8>&}i)~u|)JrHՑi%U휣|%_N*O ECiY{{.+ROTz-6Oo}o,L@$wl:ru5 cxq4&CjMK;+Iz'?bDSgWISkzPRK懞\4VzoWjڳ0~SO}}D:[ù +cĶ2,.ƁRD?iIͥ],}H[;T[='Y[gů_FgSxiȘBo{v=,p֖ &%}i)~K+qWC U/sj$7'9IOJ7myU]=TʕC@OwCý_޺k:D%{7o5+Lfj=GMT+ $$IxDnL#zU*ϴԇ{Cjz~V $ݴiվ6&zRrE%ύT.*$^^U6L~2&/*u`tvsҵ| *ƖI^$&ynL\乡Ъ>0ّ_UWU/!/T;Rc/lH@ݱu8Oͦ\Z'm6ԱոCBտ}`hE~%yi*s=5ECՓvmpO*zwhz^K;wOy<!;n\ʚ4KKM8}6\q^䅡?IB.V=Mw"ޞ7$m$m;w<3ˊ/a8__ UVk ^{iRDUI. @M27Vޫ]7PRLTʴ2CG":tMM:uĶ2,rH%ӡsCnɡ.\kk bO^-yaz&Vߞj7SnJ^VzupHm;w{BSF%ǥkd2V1Jwш>&j+@O|'{{eo!?-ӪS豈^1PO C[z. wB@Snٲt%6\j$8c,L]2T^RDoi?.E$?=ұ\:T=VYU~(CCjV}Ƿwp'o_1&擾?:Oc%ma!#zUeUnJrQ#s\!\>z$1WcJgv`Onݵﴤ| sY1c5 cxxYzfL.jZ.JɳUٔňRKWɖ^W>:TԑizƴWvחvEt8=>u>o8߰?|*ibdӅqCWEԿ޶$ʏlJwH;!|i*UO>f=ա=_xj+O@D%{7o$}\ |2M~y+qxGC{sIK^R}s"3IKV~p}rܝʖiՓiB\HԮI_?-[VZ8py UT}K+0~fxʮiK-$VՍIkivTG1'E||HV >98MޝEt8e .gq W?Oc%qkט\9TV%IG̷W'S~Pш>M>blWDt8u 3ܱu{0j>iIͥErW89M}9DO 9џHesՏqz'OL?>VZUyeZ{))H@-_;L.+lZ沸9X%+W,T=8d\'P/I8Lfj=GMކCM^Hjn?\Z.Mrp<^ǒ[M5I}z~yjf2=C/^m۰N pztӦU&d̦uK['8Vm_|dZvt^^uUҮLrhZy-Fԏ\3Tg{o*izܡz+W":tNJڽ7R|JT+qT1OӴ?K$POrdH'^Urш>T=<[MvW}+O@r֭3+YWp|^m.6\jUM+L-E$zϕI>daHw43\;TbDm!pP=+K@ ekeI_Zxmz8pcC5cڞ\$-6"-,+T%z1vH}WΟ~dWͿC="t3ˊ/~JW/q+qk"z%w֘ˏFpO%ձi!uwUn^zgz:2v`|fRD!ݴiվ6&zR<̦e.wOU?VVۓ,E]\ZUIcJ~9i6>Y>z4MC*i_~W|K;vY#G@8ETݼq.}\6}8Xi庡ڏ+|ƱͶkcJ>jyΤn ~qVN.=-+-J<5jsi5٤RV&Y RcLjTreqoч6>٪}tj%z?zJɣ":\:tǍ/XY3&}i)~u|)JLp,O=^9_J2|6-Tnn;+57MSGz{tXn[>{hYOg8߱15+qW3PydZuPy&iK׹\dNCrWU^7 }UuBʘ~`mtaUD5|cqܤVMfSxSM\9j|wZ#zK$^m1WmOyl멭Ǯs﹯Z]:Z{80|ADܲe[{˦VMI]3n$ٳz>ؒ~V(5T=T}rzwzYY5V+*wg{z!yyl0Ԯv9G}eYOc:pڸ Op$i9o~X;ozBӕ>IJ=94&%ڈޖ"zիkw_{oS7\}>_U$ɓcsI^Xڹ\K}3IX{ZRucwg1R=}Xmk] {ʆ!Xuxlk- {nȟKoU򗒬x+yjYWM9":'ۘzxڳyڮ|(I[$Փz$G#I>1oնVrwZH^轎 SkW뿱}e= )zk&&u+N+UowW͕oִͤgezxZ>ﴴvJV=U-yi夭$gwfL=i~LU'W%p>5YnO=c#ʋCL3ϼ݊~o>Oa,~]?-v̖;iyKr~iV/ʫ+ֲަIr餵oWuUٝ.hkR'\Z'VUuzpgΤԪHڧ<҅L&Yg8phY 18iLuW֒Kr[|mE\&-6niɹ+uDO;ѓ%hyk+$&(ɡlRD;/>LMڧ[pkPi9/8)شM~n;o2L'U; Iz/Te[UcI1zL=V剪3I2&wV$1y(_+w끞e'熴ck߸.d?8i/}vg+q|+7$?{oIؓ? l*ؓ#Hkm} ICI6%zk@ZNoI;l$svϥ|*-miZpoYq r%_DA@SugZ,JWڷZˆ^ Iv#z[Uٓ#,LZ5?~Qm1ߟ䆤–"GcICYg~i,4&$!9 #FfFJQWŮJ.U."IM* ځϒ$DwYM{i}3{fffff?9{7%$Csg5LzwiEUGt3333ل=}3Gp%3Z0ؚ ))x^O zDh`o= ߦOt333@| _%g@-HtQ`5*"vf8!>T!Ӎ{_ADnDDE6?Z+% u#HxHbR|N.Rk'_Gt3333)ffffS7O44cxʡDG_g|h&yZ~(󏀡G"}ْ]ijt^ P-GP5~ǔDϐ<g IDG2It8LK)usvH' 1Ү$:@@ $Ru!J#њrd]O4WgF\@߁/WoO_43333r@733ظq]7.WDgnxBbɶsOOǡG",AI[lgfsfffff`@.%FtZh@Iѐv&|*`sJ݅x+v|WsD73333/#sq%@M'Bas%N꺈kv V2qWtL33ADm6I90S es|%D5BOJIKGh-5Ԏ/Kmӧq@733/7mG]`SgC,dWDoPp;>j.L:aAPni,yVՉ/z>D йpD7333H@jȑIt`>O]z5 ;Utm< z x%`3=&J337̮1-O&2߃j4|>a?]2)^7vppp&fff Z@;5 qТAV];>A8Ngj}S$+0 h%ف#!B$4"=y_#[A驄$xTʧݏ8$[{Qj]/Oչsgg>> ]M5&Ηu1&r\M-{6"z3YDW'P"8nffff7HDX@$ͧa D@#n}C$w| 3TC |fffffރK\6Ҷ៛$|cيM2yxC S`k'̓<}.֝3ݟ芐 +AC䏦fffffRB+rܗ)A>rcJK?7Ͻ1iffffv39ٴ‚[vXԙlr nJ(5!i5c#:`Wx^L6N$Ѵ#"mM5UщwK#gEo{+%$,lllH餒5RW'Ɵ_o橙K3333̦7*X7!UC4":J]kgDE璯DBDdn)]C]Ģ6hU #:9!>03333{O: :܍CL:p`NDם%$Ւ^܍8 gƙM$fff6e_^73y $W' i!Pgjp$zQlFt$,Ic# ]Ä6d-t#zU%M,b∎X툓#(fffvCjݺ;u`.h ߗX@aR?@sV&csѯDO&z=tmDOu/>D~~u0mmӅCךRDEt=,qD73332PݙyW$Y}~#:C4*d.R(=͏IB>|nAfffvC_^86ɧ$GZzJ1e2pD'532Ól80ѩD @tvx7 #:+=@ұx 3333D0P y'WXɟ2CV#zt"JI2scJ%婔2ʂ/}3{fffff/t333,\[LzlDoì  e/84GЗAR[y.2ٙ D?aE!!&M󅬑Mm)VaWA7"īL5Ed Z%ͿO4Bdf l>VR8y*h_nfff݁EQ+BwgfHхfY8,KOtPb:{L?#!i"zՠ|FWD?l&5bӉQ;|jiIz5#4\CWLuH^$467ɢ!,ď6IyJR4_7Ξ=6wiffffp@733I}y:%VNH?Bd9?% "m1F:hAC1B"UwAK|Z-՗CTٓ6iW":ؗřZIt3333V|AkkIw6ɟ,jSx!BA}J)Piģ_Z64="ޤD/ʹˁ^ (t&Яhkh8,Ht.tv*zDeDeU TWVQ`UD#:hhs7333M:Xv>!65@w'l|*WR9_Ф :hRRl/-^ڷ<{to=q@733Iy##I@|":L.hDHB C:}IW2{݉v܁A]oB љz'aU&K,bj}IJl}e# ŪL=Dtr^'&ОI:IqW'3fV x5P4)j^Gl$uD*S~ycXǻR!p)ۄ>KրύYډ"V rѮ]R>2ޔz ;>! +co_^M'<652s0a^պnb] ZUE+jZ..Q E$BQ)UP#D~W4(Mf\n_PS/;+$I7աd4MCۿz3rfffffM蟬Y/%~?^ m\Ot/0 X~Ot;"ICtw)=GF:pWs씷b0w5 .C X6A *Цs#b4_>"**VVWTR P%\?ywpf$\:?hJv^&k|2!}8P`D:;=pD\J<pB]<)i[=#t'ӕ=Q]y".'98d 4.W.B 콐5QA=E2',#!*ZuM_.zD  *jA:?Co]zSg_ւ60dC4ѿ5MMkkWǒ|Δ6H[ 9Eqx@Oӥp:ac籉#HЍ"wFA#zh{؈F~^`GD)xt}N5<"ڐwMm+t}f',$O**ꨩKԥuVu:DwtTynɼxꝷw/^j&z?鵙-!_[;pFНy!m՘сUp2cz&t" =/tﮍ;ш.QI#gG]c"T kr/umcn't$9Z1n"P,7,-*p-9Ͱ*jEҺ\TQTQD6Fgs^|PvonffffjfffSv^]p1w]}@p6"O }gD̦џ{:r蟢㯏bڈDt}wpmDJly4x&GK0 :kZyGt%t#:zz3333{TJ*V]KTJEZݲS7ЉKo/K^D333Ov* z6: mny\S)=> 4OVnҾoo_?鵘nfff=}uV[.ؚI0&P %,MY:-.zq8 =SzNT<%0@‰Ԧv+Xy(hgLg8drREсEjn[p x`op&M/pGt333pfJ7Nr.2&?bx 9#]+HTqLDt^Rk#:E1 .NFC$^dgZZn!6lRZ#*]KhZb)qP n?IRZ_eGՕ+ߧN >z=$: rmϯNGz%](x&k"vl~>kDt`1b{[{K!"St*g-@gff-ꪦ:,qIEZ>H۾fzffff[̬;4~c Tb;$f4?<Թ1z5] Bk3D9OU9`)7(tHۻ;~W{GnF\P{qǘ?Vp̤0lm&荎ZQPnffTEMTQօZe(JQPJQT"PV:97JU[_gzffffYO_]9}&z!w "nWm2'% ":hEݿ{QrB kx {Dt]CEa#gD?ynLKУ7:7I#ED7339LPl)qCU6ҿ>k1333jfff=(4E ԇC7çp5jLD iKEpD8W]coY'a"Gtg1gOFu]"#CpDM'sI!@^n8ǛX`wfffS@_5M;P]*BD! RfP]7/^酘Gc533~kӒy;O'QEr.pD/3yCxܓGttqg5*X'zOk\Dȃɣ!f!-{El W~#)Ds-'S1[1&ܖpl VCff@-꺦/ZhSIYJRBfjɏVn{|L/>Z nι\3Eя3ȑ.StH'+>yGgKE@VhFs?]ӈ'qpD}|DG\,p@׻>*5="znoĘ?J#r=!eCCФA'p[Ԫ<%n$8aafffffff֭+gz}NDэ":| vDBJ8[IIwwWF'ڑbOa4 4D=>5.U> N y])w{NMLkB7Zٌ꣨WZ+U}rRz:3i`RDjG2{4@d$k#_%R,F>/](KA,hۘs5:NgvnDGкt=p7c"z!9Vljst:xVcu##L<]w"3&$Lc+ي꒧}tlW23333zk,D͛v)Uk#,ϻB#ze.x)%B46Kǀ͝ыR. ]lvR/jlD4L=E:N7o ZUuTh]jRe(@u*UIq3`)Wl?u٭z݁E f;EHтF#pIt8RI%xk#bb }JkO7 U ÓB]##ro8Q6"z%GD΅t:BO Nюa&!nffDg ĕHuHOه+>~酘٭zz/ lTB1F} +^GOR! G|^+^MKz;p 'Ӊ ֌Bn :h ަ˕x" 7:SڙjffV,VW߉JTtJ"`f?єWWl10333[lfff=UU O' enF|4Ezr8𴹴hxJ.\:%ЉEzUpo##b+ 㚈.шp*G5}?W5[ODpRvDoēC $o4g$wL5fJ>GIew=9\~]RgwE mQ7#ޮ ΏGZ,1ۻ jt2J׻ws2BW#r{IwDnȟĕZ|_7|CǙbD?ӓff3-ꨩ2TѺXjJ(JFPnf6~fzffffv̬ܰ|VՙD4gBl3lpwDdNItpDO^K#z"yL%^{An/jD4Cdp:|q^יT:Sff7xy?zˆ_ϗ虤؈:Q,v]p9 G];-0{LDK_ǻ+9\I:秇_Kۻ_ ]s:Gt]2,H_} 3^fQ~'KE(DA}Ye;333338YOz&=jN^o%P/w GF72W3n]p&ڪῃ+GCF" OdlD]5۹$p{ Y#čަB~D=>̚M#Gt3ixJWZ+U=%nff3)U[Ln=fffo *c %Tj`lD/`=>`MsHm:I# oXϸRKӍ*Dvց#3/Z ,>ޮB;"83C,C=䲆oKL;vr/Dv櫂{%#GEA}Q5uպXR E Yff!֪ϛu٭_YOj@S鵛Z) )snD'->v\dmR' ,݈^sx%5 Vbd28\L)'Gc؍q?^ows& #lJt>ipaf3Sffb>ٟ]u٭z+Wxf^^dSq.aC+2kTmM2QOҪfԪRW-y%.IJįO$It$I=GwF_fЮ`co5GVN D'Igj}$)^-^;b.GVy:mN+oƼ^u䗀~SW)j:{hDO~ֆ{$TS;#S$}wn"$I%I@s*:MX?%^DsbnJk):LDF<>AFu8ZEN\XռAxcu涔| ?k'6^.әI|5Ft]):2YO]4%Qa$I7¶߹A%Ib@$I}cǎykn-1TG/uo_у`[I:i" 켈fv{R:m {k8PǍG~^]l-:?2UU1QM29YOoEiMuhEVU0QՅC$-ʶߙ$Ix=K$ 4C ";(OGO#܀^^_Mw7̻/Ǒ>&YLUUUѪ&luT"GVIf@$I"0KDoz{ q6lMaЉU^0눃EVa}F7lxNDNבAaL>@kEt:/`Y/2򯭎^MɝL«SwFSx+2QM\D픸$IZl?h@$Ip%I@z/R1u\KepΜJ:g[*  '[}g*Gu0Dlѓ<9Ud*1F*z*~<~VG;(UV%oU|9C#zkD9):j+h]qJ\$\v;kGI$,tI4П^ǧ8\5Z8RW0{&:۪23{;"dw*Gue:SWE3U}t]Gdg8Q3wFz> "z?:ם;aǞUċ=ॉ)n"Ogq^W %T͝y; hULML2-%I]$ICg@$I]:Ñ&T׊ c@D*":a:Ձ|=DNWpht:"f"x`U0$kPD:"z|Oҙn .":xkہUjM9%.I4.o7K$i $i?/EGt$SFtB-QUE7"sB}_*3%|t5 ٺ|:31Mhҙτ<ҙpḞM|!ח~̯5+@Ȇ}puO{ '%&'&wlߴ㖻7ܵe$I4ym/I$ ]$ TekE&Mf.&G3E-d+98xl6`]C<8cs=^{EGg#:L5i΄;>g_DOLFR: "zng}yPDREu~离{7ظp.Iez.I`@$I3ЯdWE(4-;L&?f&xL*kt"چx0b;p Wَ}$f GB>@WzD8pc3D;2yc&O߿e up.IeK$i%I@+m ~ [{'4\D\`;ܓ-'څN,32edFtt#zXFLɺsFlDogRxy3I.|$ڸ5ok۹$I$I]$ TVz5t(0ћyI_DEIœYt"T_sοЎ<\D/s48}]x "z)3 ُ u׵M]ϳ$IV(@$Ib0K\}6p_H'g6l*~P2$wy=';݈?.wKF`_CD'O4M6 `[UiEюwVû#$IJ$i%I@5+~ ~js#zC>\X ?ND/=Dd%FnDOM@C-i^{khgy*nO љj(S%;+O7gd)wy{#z/!/$L漈D^Kmw&@0v枾 >d>WEx@/w>VY:Tӟ߼mݚzb?3I$idV[~%Id $iju^Z__,m6)<Ftѫ؛J+M;^>y3yɫ#zg=sd>ڷFJdC3ѓ=k#m{rmkj~\$I%NK$i $i&{JRTxu$_Ouz=لHi2I<0Qqwmɯ_vDdڰ=ِ|$uAD}ɵc$IF~.I`@$IUVk@XWu#z^=>;ދ$-uM?gw{-|*/"zžv"z?ϔ;/y " 1;M{=K>YӋUNڛD_ۚs!I$t]$I.I @g2IM?|I>;t=6<:|I"zCu>ŶND|:s٣|m6ߵ'M$IFX%I4|tI4MVdMFo6#%nD)E$IZ-%I4|tI4Pzͨ|&3d6&Ͼ^`oD'bf9N7gޙmK$!$ܳn$IҪ/,$IZ tI476WG]x6۲bg#z&@x['+_I$is]$IWIjْT,ѻ%׋ |< '$!uƵKP$I$I$i0K;"~VQʫ"z?%W|`~DOxє[&||ۤ\$It]$Ig@$IU> Qr$\DόI$h %A۷ݴe /I$$I$ ]$ T<}X $ voߗY )ɋ}R.^$I[a@$I%I@NUTɾDH 7_E,O mړkk$IP$IZtI4Pڀ>\Q%_(\ ٻ}UDv7YNgw}ĚMKdI$iY%I4ttI4PU"HK^E to[\[׬\K$Ic*$I΀.Ijx>~}D|>3_޼w"r%_$I$I$&tI4P |B漈}{ ^XW)$Iz_Y$I $ij"2oY8 n[fj4$IU$I΀.IJ'ЗvA ;/ojMy$Iԧ~$Iɀ.I  x>,FPf'ѷLG@I$i 9.I`@$I[/'ɒ|وsI~`$IU"L$I:$I Ws>֐nx"|&I$ g%I+I4F*x.3dg;p[F,I$I$IZ $i 'G'LJ\WחG&I$ix$I]$ UไB~}.$I n.I3K@ b*^$I4v3%I4|tI4PBE<{^$I4v" $I:$I(mkZ^$I$I$tI4#)>$I$I1K$i $ipY$IX $I΀.I4,(I$I$IK€.IJ 1!I$-YFI$@tI4Pm%I$p$I%I@#|.$I $I:$I tI$Ic- $I>$I(*@>$I|Q $I>$I($IX:.IE`@$Is`$I-$IZ^$I$I$I$I]$ <Ǹ(H$I$I΀.I  I$itI$ ]$ vlx]P$IZ ʒ$I:$I(}lDH$I4$I%I@=F|&$I]$Ig@$I$I'%I4|tI4P I$i!@$I"0KJVqeAI$I$IZtI4P8>6‚.I$-NK$i%I@}L$>$IBŀ.I3K@NK$I %I $i4 J$ITT$I%I$I$-GtI$ ]$ ˂$IB&$I>$I(-ET>$I$I $i Y|.$I3%I4|tI4PTq!I$ d@$I%I@cħB$IZ3%I4|tI4Px$Ip]$Ig@$I$IԞ.IE`@$I%sc'B$IȀ.I3Kn;>,(I$I$IK€.IJ I$i[M%I4ttI4PdmD%I½$I4|tI4$IYʀ.I3K*$Iq$I>$I$IȀ.I3K}|O$IgK$i%I@i>$I$I$ $i 'Ljof$IRQA$I+]$ 1S!I$--%I4ttI4PRYmDŽO$IPx$I]$iY%I€.I3K"Ͷ$IX $I:$I(IH I@I,eTI$ ]k $I)*X8Ҫ]fCzkہo1 |c׎֛^qgd|ɛ:+U{gk?<켏E$IeȀ.I>;l 3:W%IZ2SUƇ%y/hT_nΗ_>5ox8II#zu%;T Q~K<2 /P$Il $w=sj_όz=$i<$P pB™xѨw28ToF !h?(Ft輯bnA g`FYi)U-%I4|tItu]^3GD%IK*I_?%N:2Lor?UC%LNEaG$zFdo(w&<]%I$]$ݐ?8v¿j8HN\$dCu|'Cw|`{ԋ![}=0r@MgɅ[  ?B$I]$ݔG^ܿm?ȿ?HN2plR4|VwA IDATw޽zL{:g壂^FDti]$I.InG/ 3OF*2;{J) .JLzEoEFt`"}eyN _h]$Ig@$I}s}/QGAHw9ᣌxES#a y_sn61/7>#I$i0K~ 88kIҢ !g$lJ8 =C}gUѢh@$I%IP=9HGł.I7Lr!:['Gv뗓M;&gވ.zZDA.)<qU>tBzD矛_?S5i)Y%I4ttI44_}<~=?zV $]ߔ8.nxgG^%=E`}ŝ.۔L*AEĢ0!Iǀ.I~ՙ?}{׎5?z$I+JF\ȳ$'#WYA9\͸~{'oƫ^zl-jľiDF "JiX[{)JC$IZ$I.4ήGe/3%Ic,Suĉx7,y` v0m@אE$IZ$IҢF{g׎Qg9󹤑H(P@i"Qha;9|4Y/l/z2Gt zˆ݀֩!k@$I%IҢF{v"Qg2Ky3#|v&OօcT|M9԰_֡Q/Wbm=EqfJ3eh*@$I%IҢ]h?g{{^$4 y$9%.1%I$i90KEJ~pcKA7^r8.&  /%>=>tGfiEw߅ZEsѥ)QO$IV$IZ'ܵ=!QgY  >ޔxy)qiC>X}cH6z1Ќ˟oI$ tId~s׎Qgɭ)DrSXIFRq:-^5R$I%IҒDÿ; z=L;%.,9fm*ȈӑFtȈ25H$i1K%,FI+N.oV $O2eԋ2%I%IH$|ɮQge^4gJud|m :4J r|Ad&|Bѥn.I3K?oڞ<굌|MUo|)qI7e"- ѥ$I>$I?|] ĿZS6t-7iR NgǙyU/0w_ǣ^+1@5I/z#$I$$I wqkt$8?'9.o˳^$d!Q(S z1rb#Ieɀ.Iwx7q^XI@I> ?3Kg&M$l&Q/FZJ4Ϸ$I$I+?{vd:굌C utO2HZN&^4"<]$IWz$I,?:2vVA^yro5KZfS@In"z$I2$IN ڷ$V*y oQs:GL[5Ado_/^$ݤLn#8Nr#Ϸ$IƜ]$ ݻ)e;pKl b[Vl%q V`۩!zW_KIĉzzwuhkeRp<1K$IR1K_ϡ>s]%bk%^qz59{::=namwlӣ^$}&yFN]^4nh<]$ICg@$I =<[U/2sݐ:WswZ%}v`ː(2#Zq'Qm^"h(Lg,|$_rr'oz$IZy $ifkf|r=ĆQf[Doz44"[`Dײq!L‰r<"o3S*[|Q/T$I]$- ܗ!cᖡҪ+\J:tRgD!%T<S^oS4~[FVI$F%I  ^4K+^=HPJ)E#E+PLfJa& ӥpisb3ٙ+mOseo>|8;H$I+]$-c7䑀_%=U^[R~:B$irL t#>U4%B; W1J)mOs}s33\hO3]ng>;^/_CH$i0KyZAl'9aD Y|w!I#k蝈p$RɤM2S+vf 33kd ۋܵ/N~x]$I+]$-P2?H #s](` Q/CKFht=^n^R`K+b͹fK4gLsjO//L?߈.I.IɤZ A|yر+4lhk%r힄vz!Zɤɤt.\.m.4m.Lsnf3imyYkLj8\D ilS!1Ӧ]Cڠh4@>h!/ P( m-M'čر-,(q%ej s{i[:J$Zg?Cp}]j w Xt!ќVP_*ΖHTKU}_uREp1<"RBCD,w8b|//^8ԱX t.7T֗$:ccjwr)D *}C~-bbxiqGcǧO]{ 8ͻϨ/OdGPPE~̿_#!zĥшu:SQ*ޥ0|o~0ߋ|{y'zoąWKRJZJd<MUq#Az|BtG?yp C2^Qq}  W v~pȆykT<],?: ^:XĹ #op C^ɸ>*gHEeeMTċ>1vG"s =nsB!o} GsK-۷{={׽BtnuMD^h+?9vGg̸-*8oyM7bwZ 1M抺"N8*>2v Gfx-ތݴ{;yrB q%:"^0Yo]Q;пVFV]"nzO? D`a WGěB4l]QGDd!DgTq3_~sBtLeNOVƛt&pM<5=>?vlӏwq؅2tVZYq22nϵ6^bT] .|^ CMDT\BL@^M .<(D`j!üyf{"+DjԾ9vp@_2 !:{[x8vF-pr藓q[F~g^TCѼg"q4  x툸X:3K!߼.&@W?o !:b@̴7cdsBt$@YɤVmEMW*3z!:".!:BfT] FϽ o68d]Bt`uVEBtwMwb!:GMQ}DU~XgWfpktW 5I!:GKG8dgREgG{O\AIt&*ɓ۟[wk`3h|B3"ZYBK:-"&Я]`e~VQ4ʷF# w_Sز OX9:pH?VFۅQc!c}Z3 UW_xiq]7] KƤVF;23c2.5BFkDD\!:# LgR*n HZ;#`3d䰕8ukfufd~ӿo&?+SCVNpuREα+x".~23TyV\-Ylwݟ,M#@3&G\ ?w] p|}E#뎱Ka<QQ} ]Vvq"W~tD tVDhλ#3!zNC8 u{U~o=={2'=r:"@^5v+g*<6!z7vlJb/0U뫏_x'.`U@r :U ~X :pAZ73Ld=]7yEC C00a{E[8X3{{|r!> 7?9֑tVCRW:[*ⱌ$d楯PEb8G-qn>aQ_|}O<{c2S`ho`Dt5@YFqص\N7хxi>q~.S+WfW oG~摳3D}$tVDY{cqD.g#M_..Nxa/lUĵ1į|֛~ݱX')uX /#Q7]+<%X WĊ!- 8؏㙃  kW~g}wƮ`]Tf_YZKetݮZ]Ѻ1eC 1 bX<88{q~~̅5C/ 1>N`"t ]W;_T7FS+yW_|"s_~֛=cp?@ktV`zN_#c"}y K/ijTVz=cpe xo<@{tVBr+y{-)#^u**ϝ]cpem܁NksVErs`E).TO?tw]qUQUk6!^7UԗCθNF/-7gB#wOqas:"@_ogBθNvC.OAY|Cʹ8"3?[n 8N*Ҿf, X :p3;;:]Btwb~7؅)uҘ' UܰѼy1D_]mGBtW`hnt/weTBve>xqB&/L%c X:p۵86#^ !:ήz;v!S @)8o" !:Ϊ37 fL Dž8G2TċU56vD)谦G d5KBtF]T Ч X :p9ڜX]"bJh}E?)p:#*trt^ucF#-7'c0eO3 ehj|T^B7Θ?xˍ؅LB%!:p9נ"NE<茫郷]ҞfӜg cZ;CWt5} ,y%*v#"boRh2v!cq4e19"@.GJU\_6ZUCgn.` )@k%C`%sP.mB vǃV*v*cD ]Wv|Co}cp2܁>sZ+w" &KY}?U}}v3wOun̮"KEDDζ\D Xe}|e hLzl.uGѹjwWvnw'~8=fٟ8f~s"cu/WB0?}~܁.`MmEUF.ŭ{a'~8]7;S}u['?u]qOƣ-qf(1|@k4諒98_&яavם?v.Nqd3<.!NE +"@( IDATJWUTFtCNDQd|ᄎwNfuv99g'~1;uyk/u̙6KiO3^> K ˈ"NFĹ85v=kfϺ<8mrT7Nvnm|l)YDn좏\F)c+K6ĥDF&ɈEu/tw2gݾf'wnvv3g]ۙg^3vǀm`8Ug*l-iiX:T@?2 D".l݋;9}'.Od}O}9]]GUt;Y*D.cKi>Z'މ5vv;l8NvɮOuu['}NߍݱT56Gv;vjjl>zDϪv߿x;]'?[;ɾOtl;3.v.coAl*"V:YQGFɈx>"nvnodɾϝ.Oun;剮]}f+LYaVSJHd\vvvnTNo<]u[Gu4Ri(s+"@.G>~ậD]7;unfv}v]7<'`&GSPgOJ_ĩӷjNv'`@V`&@ǟֺ,VCOXBy : 0b}mKX:}tiO,=$xDžX*5';Kk%6Ls'8.riu1dКg U֚A( tr4' Cӟ2{Q{NE5VE\)WF|6 )=+!@.Cy JIk6KIq!@.GsMIρM3xNWM' <ҧn6Lfcg w: )B+ҥkiBRS X :\i Rcfr] *t`4$4 VLktX`ER94T` 1.L کr!@J-4l.}8%PZʹf0 WA,wXS&Ё ӛ@}&p RyJL0v \dSCKn@`UrS1Hkp60w;#VE\fXs;a:ɰT4=NXC.6͐&ЧRВ #5ULZFrm:^AcK05CZm6#aM`Ee}P`8}:,p4'g p2KI+"@l5LBД;X:p9z0 X;ЁͲp'"t`OB:2 l^>>-?THME_f:t:p LOe ǝt8q܁*4 Ӛ0u5;qBol2LOE+cloOol2Jy,+`(ٟƮ\Nk&dK9}"\Jc(`(V @s6r5֚4E[U2t`l} 2LkMU=e:lX?t`)GOch-OF^X59L`Z\ltڳ.GypEa!h9Lr80y+@ՙ@`l2: P&ƮKjh,gtڳ (i' 2t`GvRf`U Ёra!h5~RIρMsMKr9L`4$4R|lMO\z4g ,UzPN"f4Sh:dK@f" ameMhN,U)@@cfٹaȦ滎p=L`!5'RI[Cv*`|䶓D[9h&Xrl@[6۽L'\MCc@`l2܁>V lO{N7ucKUf3pwɰ6;M2> ea9L`l 8@D^tZr5 + @h#4 l$SXG#h&X*ƮHS546о#XL`2> %1Ok&[L6RF8*jl$ aя]G,U͓ &k`  @s6R<e/ 6;МX4XZHf` c@WtڳʔNU1Q`m-Aol2ZEЁM:f@K}2ИG P7 N+g ,Upfڪt-k&F6f5~RF3!& h,6h&X4':ecCcg ,ah&rzW} + @LXhB>egCkz4g ,eS6PEMgsIke_@st`Lj:g6;3/EąkhtZ:Mr48֪Lj齏#NMrT:i+ˏ6_.8?v-:YcK},GxqZ6h|m @{t`Asѳ])Z;p;+` ,U9LjD{y7j?/]ư=:T}1\3O<񯫆 !( 楷 @s6R`y 4YOǟG#kY{t껙} $h3ӚG >Կώ]ZLОM&T9}",mX}?~_TCص-49cKvR"+cײʾƺPМX`h4<Y?B#XL`AsʨG O~RkY' KZKОX4XZexe>Ϯ~0"4v-;rmМM&ܐ%!B5r}?~ "0v-k!ƆLОXN y^?g+wGScrܕ%e&dKUpNAYsGl ѯhTОX*/GzzWD<9v-ǕinK`SZHZ~s"gǮ8&e&dK#'B Sg~~-/#fyhN,U1'"#_xl-]1ckC[ K'+f~/lxWD<8v-ǁ%uv6X`hC>ݼ(o!:dKm"S3O>yDħǮek`z4g ,n9yV}nqصLik(:dK#'" #<=;">5v-ST^A4tVA,i pCOq6;xOd|rZ&ǾƪtKU Z`h-Vz5'">1v-w6rC c@D9|p4~)">:v-SQ-D[]t47`H- ވݫΜQf+s7bgaT2 * Nhwߺu7x*ƌʼ59"#کX\-/m2"2 Z R)S56YEt̙?}0 xkyPㆪ|Cd݈.]]!lU9wt`,]N฻[߶x/DmU,2.?˓>5Z[KZrdKFa=Ʌ Q"*>ӚhO,i"5iJHWċ7v!@;XnK Lj\4Pq]E֥2 LODi @+q] q.裰Jol22ͩNAi5@Sq]UEصl5X:2ꈸYǮe NJMJ$[ivNƑCƶۑt\zhO,g>{#kdLc W,##s* xўihO\<>gMLsvfmWѯTtV&~GpBQ tڎ.4'@<ӗBǮe8|mGne~uBLO W+H!+Ds@ٽ WL&k"+c2uiXjО">WbUkYWkh'+q]ѿ!Z堷 @s6GͶ8"~wZ֑^3 ו|H{*Mᇟ~G"wƮ`$E їY uz4g 4އ~nokG"ǮeNiʠed\_) bPМh|~v"c, #xfBj֏@s?fBFkYN8d4V9:?. !"k9pe\_)(МX{z꥽3Ʈxlc )DY' Y*+uSOt`/Ǯ{Ir{g."_1$Q93`}꩗v+_] qzz:"MDӚ;hO 8ٳONxDӱk9ni'^`s~W|ې@M!Dxæ44W&hO 82N_G/]˱R4/=[͙@W!3 ѣ62D2Ӟl2#u?2v- ~6F?52>W|cw'z(:iGb~OD3v-ǁV3={*#;"ӳ꾯d?;+31=n)S hO Ž߿w5?صL 6s,3Tq[D|xF|ץ}Fr> 5dVA׽'#]˔NilyxsV1XF|"⁈8;v SI[`4Q;uo3*546;qFحOv}뺛3>}ɨ#k`d<1vUD<#&8FWdEoc``#TE|$.wW7*>G|׼)qCqNա5_g$d_Ǯeji{ΝP.7{.>W%"e#3>:⣳io.bih-6XL`2~2bqcg_ƮY`U'wd}}ş3"t:3~3wתּ+K7G_.@4@{g\ Jdkr)b wǙoEٌUu1sG|uugEGf6So]D~dVCoD-*~Ϩ83?ʛ#zu4QtZ3 +r;qsϽsʼ59"#ک"2byqi*u@X[g~ECQG?ץx58Vt*W? Q[Q/'/G¿lt`;߹U/=^D~<\T|a+y t7WWsC-]W!3>ѽsQǺ*My"]=1v|' ӦW)n]Ȋs|dz:3GbF֏X⩊#c/*vƮgi5\7տQw/"Q3n[,Yvon.^!o.?5 >>e}.G!zw|o"gnOCfw3|d>3d|1*Gcf'>qo#c ϟ IDATK b2j{욀6i:VwywGş+*fߺc"1|<3z22n見ߏo[T|!zw"!zƯqC0k-<E>emK}SV^Q:|_AzVLV<%ļsƑ#.> TĠ'?:>ǻ Z>hg/޲.'#""bNHób 8  xy[FD,X?Y\￯Կ ,nFKm?\Ě<ވ SeG:^h~?ʛwxZ"""""􈈈M X$8Di2 A?o߮q(zX j k]y$By/=ُ=]Ջ}ŏ]j7Qѵyֺ&NE Scpkf>_nn>+GoFVzDDD̩4W=OkM٠%GĂ;{G)-f+Z޷~:٢aDFޠ7;v#})VЃ xϺ zg_tMSxi(e^'j"MS̨#:B-/4[wc[Q)ZYwz-􈈈Sy _].ךc~Q߾uh{zV6[(k>6>܊}]]屦xx5voЖ9Pk+/?ڃu^þ5ծFz6y  }o+ o n:<}VN`-R"""""~S 1'R9Ϗ`k#buƮ+'N'~Onٿ4d}R5=xh{mMa]g5!y6Y?Vց^j?C[Gx;)(:]D'M3 ])?.F7 xw@7rsXـ%*lNdU{G~wkM$GDDĜ&OYt;۹_tV+""7uXĉboP)7 tp85`zi練<ȩ5ĕ哪!꾼5ASy'ك--(h}Qj}ibATtuSxV63P6WtwthP&wʨ#:wUD? ſzf2%>ߘnkGQ/%""""J@9i|ѯls&$8T{D|}?Y{7C]ZnBZ^t `f&`C Hئ'RgEL0|zo s?w1[ 쯰#1W-k/OKv>C=띊ЖodOІlDZmгP6WF֮bv"qv'~ 9҈>,:44g?N:χnew_;z-_EzDDDOǍ{ dR2bOx^?Z"""""~ps7os3\s^F\Bhށ=/L>)ΊG_jS@V065>c|b8i|Pfh;0S+U'tl}]0p moPnpgrYƓﲵfKf9~ND?8Tl@ kovA[e03%>8~hsY8_46txbPU^SF&`3qpgxm]ew;jxX ~'mktSVW4>W6ީo6Epʻ1]흆)>~oth;IoY7eѷ1֛k~$ז 8,ڲGFzfݺv]as*8zGV_'_ŕ̈Wp-A4b#jT|wxPk<ӆ5˫8VVItSe}ecj4uDDDDD%1'0,!_sze koSsr:Gt~N_  A_,z%;fKl}Z\"Othw5;J!w-bzhh?SᗥrK~XFSTvpȣ:2Wj;?mgk8WzDDDImѿ٦M3sJ-߄?#]C */ˬJ[}oE[_FܳtD0w旂qgn:u3Fc_ѵu8u7\sÍ}<”Ѥ^Y?3HM5ք"T&{Èx;-wk͍asOkɆRvf_.}DDDDD@A/n`q/aZ돍zq)/uݣ^FW1}TV@6})q>6KQ}&>%q\u{Cpst@vjao7<ڙ\wWzPgԭ-~ tWK-Us{rc=i4UISwj@}mˈ7G1v #ZO@yQ#.O?ٹn1Kđ(gB[31qX75zLp`Vt֮lD^_7Wfž]ԙ6n̓NOUk0lųo)eV^'rG|csѭDžcD qǖ|S?:n+2M '#~kÈ,>o%0/=?[kݮLvTdWjůt9VL #6-vTDO0LW3QaD?('i_={"zZ|ƨ|Ss7mI49zo Ҭxi gMS!4ۦ+2%Psg#=s]c q 3cx e3"'[tm̮ٻ*Lpj ::XbAM} Þj[YgW%c>|Q^/g=r/zqy7D1CDUײ|,=1uŵg+n.˰ϙgEc!s->&Ό:6VɁ̈.+ZeFt3mTk)V݋W]Ķj`0؊u*Ç]U~f+{fξxRO#WL5#oި|3sѭ7;鎑ܿ_Zv-3ǖm{|눯PnX{#6[)T\gf3u'X`'[vgP3GDiFάB4<$z5mDFNʢZG|V_yDl3~b~LGUu9_*""""o"""bN?Xu+h|\~wٵܐQWGg֭{ nQiW)ϯzVṶ2cޞ]٘xްakS-|/F[Zk0p;݆IsվҢjo\wU}6;Y98Z >5)Is1Y> 6uMOWs-mMuÈ]vZ*ˁU [v])-L?ԙKZ ia헫Oypmi >&=]{zq7?1޶?Ogݫ8J_e7u'{6]AA?st>OM p͂g;N@{/}R1Šߑ4l[_q>$GDDĜ4|yo̠y֕:/ǟ!͊SA^0gJ<"~"ós6Zf _с;_)"zSmzIݝ|]lWjΚUSIX[[:B)Un*ңlM j)Ӳ'.=a)pD1ꖟ~/*u%GDDĜl4Y֕O0uD8zpo9W'X)'YYYf_1%~QD,4w i8Yn1`GHgEUgDvR*jgӠȓZTU7 tܮR_2&ѵL03b+"i0X$n'koף^WDDDD\Z#""bNba<.ψ8' e75%NYq hKw* ,>)bSէ}o*yތk5;>RIO"%X1S9F8:q71q%pgOIyy1["""""􈈈yIMD`DD&SsxPg-}i8[|~^֜LXS":b7;񖬭40ikDG;-;*&*n ˻ǍeyQ}uE<]+4pC7"ݨ츘q}fkh1../Y$N,iJDtEhq%c 7,UGzDDDI+@B.yU+"2UR 4譽/}YD^w5PTΈ0[8ZmUΚIUM5xRELT<]`hy)^Rxw3u H7XETvQy1ƑNƉ#1u:ㅺD,*:HtW4G35ד[Q|HHzDDDɞ%/EDD2:TwYۨnψ;zv 3ݍ4 xG*$xh{HV mFʨ-;g#݅Z\Ed uq`q)3D`T Bc,>;Sa+.)#"""K@9 4_Uřq9"vj}V}50= Z՗ \όZ~ްV춙h;k eRw7h0yDW!G IDATm`OnTF+^msB} _8>@~.,nh RafԎI^"4V4^`k$GDDĜ@ Oѥ{&zqݨW1ZEsQѳ'm$-tWDtU}#3}IZcyf#,xZh{_㕢2Qa'f#P,xlwT^MA(ݭ) *D0I>h0ϗ|Dnhf\tK,%7}^K4.um7""""LIL~jtt`N*Қ; dWPk^.FTeʤڍCO W[\zm!{`l.~j1xSB7.,. H >B<׺1"""""􈈈 .ሾ@7UD oHoh6Oс[x㜈U #zt֦"Mx67 /4h'4xӵUkZpn-˝/:҂VU.~nvgn>^Ņ/KJ,۱X"iΈb81G *="""TA -O+G*[GDD\P/Bׁ[NO dv{M7 ޚ1;a @g6IO y]h6OTH?vVo`y"Zy*v"۴E Y_[w*EE3KX򘠇0^N&No('q%GDDĜ=PJ1Q/$b OwD$f ǭ+6AeEt1Gw8So־O҄ўbo/*b'fvaDgn`GOF͞uC'3ښU=ʻ%r{B;&ARB->;!NvR|ISKnN7^hzhIYiB="""""K@9b-=ϢcK#uFDD\$Z3.0c]QU0a4wD DzEܛEnCEjZxFwbm=l=d J Z1~QEzs*y"vxOA/-7zeph7ohQ!-*zbN(^@"""b~/&1藀Z"w@CD#fL.^>vVc|HߩT  v ^о""mlj= Q݌fhvmT~Y`EO-+蛖ݱпsٷ~{q3vXdXrYSU=i gDDDDD\#""uDPkR""55c|h+0/z|;"46~cf#z4Ft PIT#7aDgX{wsV"nݪw1ˋxǧ#z#zyWS1%GDDĜKdY,ե#*XZO|= oN~ډ8֝#"=ᵋ|h+ό-L~iD}qDtuFONGRx 4b%tA 'ѷ6#Il=SliJѦhe#E#F\գtN:~aVDDDDDĥ#gGDDĜg,c@D\EVJJekiٝ "’.rIuWEķ`{0U` }A[yS7~O_ɏåVKlh SEy@55X{UF^աV^\2/17N0"9.]XP#""bNNI^jLd MB\v$*PjflҺ[fJVw䵋hQ]5=}yD4]*c0_Ǫ xXM;P1M5=X#T̀[+<ȷW^E^m\3'ZŬgab学q%GDDĜt fx 0|2ܟahkg]ljq; Ǻ9p1ˉJ ZK aD/0;8gu:^BOpjoV<^$x7 Ttփ*>HʳFއKbOV /In'[`c"zDDDDD#""bN6Fg-|NDw``:pJx6>|ڶ|ham^"zDB X/zer0Yaw :7!EdW#ھk[xʎbOݪ`=`|l׃ ?X[ÈηZF\U,:kicѩ7lOUNFWy]zDDD\~ϴ@"EӇ1qѠvf) c3Yp|1 ^܀xC#hNϷ}| C~Zm.=}"MymE-l*֣ojÆ{ oњXV=_o,:6HD􈈈*kR= `|KU.×m~v|pV> z gE 1IsΉӅ/Ftqg(h=%?ZNEFʚ#GקҺ:us盆^am[yPe x}(b3.8/Jp ="""d|2< 0ϷN tbHDX0EYC6ZkX{z[9'1&1xvf#F~G z4h>CXU|_SxjSxS5ϕ}WtgW};4(D檎]`k?+qK@92a]3 :a_[7"JfˉB+m.lDkăڋ} P~(m@b}^:#k͓E&aC{V~𧂿rocDDDDDļqڸaѨr\/wFDDDős3,BZNoZbʉZ9g]DZ哮vǙ$GD,,yU"@Ol\tU}s??CH1?AwXp?EOzu#^`Ё7!=x3w(o{Vk*9Tg5&fg"="""""./ 1'_/c)Q/!Fxq,O#"^Tډηq–_>@gGtăfx'kUOv}Nx=/6EkPe=Na:XtqYҽ.SO sÈQ/ """'ٗtHxx[3ݨ_ux w!-oI:qm=P`mW ǛI{ RPA45pC#>m>zKb r)!}Ь?lj~_FDDDyqol3 1bvˈʺmQcozVonS9.Zڇ5g>ZFFt5w1QW{67詂9R2pCp AKM{b#"-cE܌yU`)P/='.#""*sVu*٦3fhc^9&GD,$m'ktfD:4sFt>'댈.W "^Mx/n^Z+[zF|ja汞\̳M)+ ^"xxw3c^QGDDDyq\>m?g@v''hrh"zD[OEty8>EKHb#p@p{_:~{8 Fg$飗UxUp}^(-ޝ7Uis{,Bؒ5!IH**)n+2w4obl^C͘uMیMOUt@ d&KE$BR7D\M?fw9#B)[eJ:Ea/Y:[*XP?n =5S۔#xPg!BtlWvߕ/9h(~^m AOwu}c_Z.å}%gd=3CtǖO;+>:;:++O-__x)t6ه%-%"Dw8`?7!T V4o+"TLyUNwf' `wJ-?.g^ݐ.5׵Iݵ݊:k!z(X:_H/'뵵];q )I$Q:}` K'Y,>3t[XgJ!K$BbLtl}96:`7#9ngQ.#ѱNҝ;.k!zx@7-G͕wպS/e)> 'a|~,}7H!'Cejʼnd[Xk~19P*=$Tntw,}Ol5>$[>\_N!z!:ه+}f&e?|5BO8j@5Cn7YKoL ~CR҃"gRYH躯(tP,tn'%gdh/JKmX>N$}\(=&1KU>liiQk!xsQ!x=)mk.7u@/!:v+=}B%:vӵIB?g;}锤[[ ѽ>Dws[Ia}’>vl~W-}`>)^Vϯ>\Yϡ&E+~-tz\:.RqKH?*Jtly#@ڸ|TH޶0|Tݡ7'&b5@;lO1!:v3WjHO~3)ηH߳z}cj-[* +)$ѓ}*IGI:I/JKJTZkJ~Q%X ud!:ݏ0 t$Btq\kv<j>#髖:I?o}KoHʶz*DBOJ>{S)tWJڊkwPgR設Hҫ),NX:YO^Ν#:`#& 5+YTJ[t%ؤvEt$ѝ;Wp1`׈RFDOB]oF[wRBtg [%$qıg)q(Z~UY^]IN Igl6I+Btly #@܊qy}nכx[=|/#;aܺu*oE a[GWAv G3@'zsBRz7zDW\?rCZBzk(T|cGRS( k)Iei֝EzI(z@{E~mVT}ԶQݶ7a6o.ZI3?9fdyu;2-? !gWIɯZ$5sR?GŞTms / "}փI0+^ȯ "B~w|Wt].t! z_w^NzYY?)B?$8e Fxzzl_WIҿJZ&0{拫=>MLju۶K7 '9n/rD3cG8ZND=sɟ_ēajkWakUtfW-?/{ R|+٧tr|2*5(|1b()|HzyWe#)(*VCD`48GiæQa lG>17]/>a=asN4wϗb%Ǔz$){RqF9D/R r4z7";*ߗH> z%?eL#eΔ|1/ JqH~ydHW⥁-grB鹾$cHv<>E2*x5Uaa7w.5՞a˳ ' o嘑A 7<}&վB;"tg7\/PjYzt|n1R'^!ߔiKw O%}Rxy/I#[/;W"_:ّ VCZx9;K )--}"$<婱9U]mw+l|m^Wb Y~w ѱ>:PHzvvsУ^$`?a'yXz'7BtߓY<A%N%J[KnahG,*%ד>h=YV?%N ߕXt:>*sB>=;օA`rsbGޞR ǂ*!*Gz-9_ YN е%O tZo ɋƶ~_=4@okuTx=,!L6̆2_P,ɖR7Y]RBKC~By+[ ([wte]xXk3j IDATW+C^Nw%SG)[ 7t&;uhu ӉIa)r^}R+^- VC)7}۷9[ERWQL ͇̇UC͛˓?3n$:wkdfY21VUZ[ZL| J k!yh(l1/jC9t;wI{ӝ!!zWz/oTsr'%V:#N:YXA~C/8|ɗuD/DžSi^>fůK;]<[paUpVYoV7*6q#`pdfL=R$~ c>P*JE,$e"?}ºcNJC_j*DOH~؏Ct;ʡG,.ޖTJr I'Yz!Q&]X,- )9~ /-3Gkm zH>j֭$j7;|?PL=LJ]RH1S.~7qvqPg>e0i%eaѵ^ĉOSZ~-lW@ַ^^ҏjN]驔~N׳|"KIqXJlz~TrB9N;Hix ЫJR7RUͱқV6Q39ٶ!&LȷELM,eYBkC痊4wnxv`p5gS/i=]=e}[4+{-D}kp{(zy.II^>؏B]i`锬ZysI蘩E:fPi fy[^χmS3$8|1mW෰uTo%'G .%OR$ V(,պNOZ drD8~V!!tߒT:NzXD/I/Z@L|a6c?W$C쿳R @ y*Iv gO56-v ,BKYV#厥dJ*RT*պNY:IWsDH[ZXdQ/ZHTNaCJqpml7%=VooKY"ҕTk9޻:3[df Uu^4W-K7_mm Nhۻ1Do:}.)L=XN t9WCu_z!wwl}y\zDp5Dc[I'Yz$wRZGR86o-z L:fPV^_r No1־IoBflB䙅tҜ"׾IJoKzy\{Px;IvG 8ݑnf; IzvKnfDMmys[ٽ{} -}~=-b޾xrL %9%Z}O7m;CXK]DO8KTK=Bz+]Vıt'9~ Dc?P:ї>dN}[w0i.=lT:IuJQ!kF|Aa{9/t$.;90uc=d)$EP?7+WZcZ:BGYQ]jh=7]s-5zsoM;Ɏ 7";MV>=\ ?GkZ=uV$͕w-J!x?[zHzl $9Ӆ;f0N0b+U xx@rts[c!vT)b-DgCEqbқz@Rw`GmC֣ uW8=!zH!rX[Zu<)Ǖ{;[p?$)7t@S3o{lXX>,h׶< 3;?0ݔuD%-KC'\f鱤%uH{O;$-mōf"@5Cqxۤ=6m;RzLVл!]zbuL֣)̝ey['-u`Sڌ06Ecy\va홴b~ܡ]\?^,׵k]:^hqA~Vtb@:2*׃fp>.L77}62MBO~(>$oTXM;ߕC'R{ռ7כAyӶsasl9"Nj-D퇞/;'"p\"v QJ4*\54~Ew ޲M17q%i]o[')ʹ3nw`3Fm{mzjW ~\ ocvB Гn-\OZ'W2k%`bimdE)dK),يAKX\DCzjy0b96@ڏ}IRTfe˅|-)"I$%ТnPK:6 @5z]Sc}Tp^Mj7Cf%Dk,sUEąBd[>dŁJn[Zy׀}$LW =t@eTz\[^ͫf6&z}_ՎO=$!ѧ`+Uox"q!$Mց*Y z_'F>[V.H7kv^o#!48!:&bId](k]V[tw/&9t@]rWT[n]} (g,U;R&PS%^JCIw|f2<+*ĮzxV U ݹ g -O]%n*q:iXzCzk|юʸ' I<=;Umon 4mwy{0s6pZռnXVy|<֮Ȩt}sc? V\|~A` !>V%CW*q"@ Ӭ@5z[%j|T!N;F ~4bǽI~ң -lE:_X+Iq)¶;ExNɱrvh!U6,#@ӬFnʽn\%0Ux^h}Bt[wz9)3!:V;R! ɚ逥;(˺}݉ʫo^` @2lgkn٫%Bڡ' Zj%^%u3nT` ѫ`9__ LoU|վѱKY+ۃ uqTXOnwg)4()|)9V )$v,(v} qKuWAz;f%e{sKE =Wz_u W'i9.T%%[GFtD#Gcvtr\ V}^}\>j[}yc*k0c֡p/+r./@B"sNo%n+x.RҥZ)(2s uv肬;OnEs,jV^w>IP^_o^"D>(}gq^|b}( e%Ws>M魂HU%nIZ."VSD"^%*q^ + A؈Gúe7k5FCi}ث$yg 9YME+r|GwI~HzvۋzqVuCEaY:(pFxP%"@ܪzx^]9Dooda^}Nyd,0sI%BJlɾSYK'%=OX7cvUd](LT%noE w:`#f^F^U$ׇ}p2;z~\>;QFEW9kǺ(綻ؙpRVąZ)BW+JU[ZҍRJcu*q5*D͏νSqO/묒:wz̺b_("IKd*qV/b9t, ѭa5<lnoۯ9zEf[LzsV+jQֱH#}%%^y8։uv}^ +--Eg kǜŵ*Kj*:IfmT.~V1nVO* ͱ Ǡ_(s鿿}5JfL'}O:8:oeHnNf0pŠϗe[‘ P^%#tuaX^Fo ѥ!1ϛf߇{tTJ|m0\R &}t8uW/9$v}+YRB*JN+I%Z%w \ -tX^W%~U`i3/9tfi y5o$^7,@nUBVǏ w=xOoo#UJg#ltg/bd&O<9)t)ٗ J1XL)^߷ kax/`!@lIBz=ʨ:_:úq'D򞺟Aa ւ㹿],pe-8[.i8Hw%Ǚ-%_TKwN ./+zu5Ed۝"<'ŁX X;u?H1]`6ۤ=f\Uxv\} [pKto_>gu1+N+Jq']ǜzaYmW!B9).$ұlI$v,X:BفƉ$ktVr2|Xy{qm wBtœz~ >s;C>^N+KN>:$հTJ-](גu-ȅ]&{AʋG8A6U*J=%;Nۯc=zOG)cp-7W~p }ޏ*}'weg )|)9V )lI*5IZ]b}xj~ !`[m,Q!nr}Lc ѧIqv}Ⳬ`Zu-N!In$ݧŏ*q]Ni[K Ci+G^L>=haoZ P>D5[ffF0Jw'=v}b[F.B ]j̧UK7G[_9Njtiu6Ϻ!z F۸Zv[my\ .jOhapvQ!jU[mm཭P{}_iw4!@Bz5z[.WǷ͛WBzWv Bt*w,pkavWJt`u\]:~\gB86:`$Dݣʀ߫1ѫկ3}39tN4i%j}{Wv?g[SՃj^>ލ`}Զz> c C*Jc'H '`svGúeoW1m4D&Q!6vzx^]9Do|Ԙn\c߄Cl*0sݠY^_nևV{(^O{}4 с bT^ӛAt>Nj\c' wDؾ `va!zU.Ә&LGiׄ,CpFC8飪ѥ֍nlv]@؍ƅͰۚ?lnKbd/^SzwoyA?t^6*Dúsׅ{}T,CM7|v2P ߾iIt*:`Cz+ݹ@o mi{~׻ri0x^P+}nuLt*771=t~}TUz[zk22i!& {>F5݋_ ѥڼUBzn U;qqεȕs)gl29t~2*Do.ۛAzzNѯW]^|QBtUwvt~3,DW êG>,Do;B)I;W2 7|JZ у:`?j y}>* oVOҝ:78&QṴӏ/]c? N8 ,IsN.3&IBz]nf[ä:N[xnI՜/W$'I\W>~ymLD٤i\6fn̫м Ћj_Οŵ'&DYB3GںM9D/nU %9׋py08;Œ]t8|"Is.Z/mE mc ۂQݻ>}@:ts1ϯ}6Uzsׯ/1,u_%[.|n6wS`XR CCjqWWݶ7RR+W|wy7S\KaN]S4ҝTvtnhVח!zv7CWj*w__)=$kߵzԟ7& чuެFս{3Dmfͱϫ#+iJ߿8okS\K.\_ѕvtn6*Df룪{9XU]{hSgn^-@'׮- 1A[B*F~r#bp O%e*0s׫Gê҇u6z}e }Uy}+KNuʟ\MKŽݹϥtJѿs0 7.Djn}X^>ߕ{5˗_+Wޛ:t _>Vsq]mT Y]7~ ѥJu˯`e),@*0nJGJ  ExDW!x}X1чmV%z uR8_CJ]Y͞km]7ү˯>xʕ[}!_>>|Mj Dl:tmz=n-` g׏w/U׃_n1DttMڏXs X6`!@ִêݲ4-oVO3{ ҍ=I* օiiS49ǥntn][H=,DwD$>}}ƍޫ|y֥FCcYCtu.0{lL3/C*5&z[w3W7[ GVqkLڛ]_pʩ.ąj^8Vrfs;ҵL+ՠWYrpFӍysMjSurj*Op!,ܴM+wv7{ΰJ ]_u^U~kļRֶ HoUz3cIz[7ҁT?:\C=iϵ{L `_v7{:5.Doê۴Wj}(<٥{78}=9&)=p?8T%ka򵈯{:DPMMA ѥc7YRe[*nh/4}^ٶfUqn H<8u%B*:\@kiϵQ{s~w1,DW!zu|ۼxeXެ82v5B?)^8x*ы. }*\w?=T{딖Vr>7iϵ{2%^`"@`MQ㞏zTuiyg1ⱛB=)BϧtR,J?/F`Un.tFsz^ƺ@^]VBzF41xС=BJǫJtEmˑ.8ݵvo+:oow#w1:[oXmmcG]}XA]]zܿnȈk*+|z.픤:=N~_ػ@`뵍E֕{[qXVǐ |K}+>SH/-M_> |p.I_bJǦ=6i}cgf9܍K6B"- PDXGW(FRȔC7AE F8@qtXaӀ_/ZIJŔ2\Q5535;WObv~橫ghtuOZ&U)#pYGϯ^ƃy&^DnFD?'#RH} @D`{lp4۱7}sܛLܷ7֮ӶsVY^\,҉RJ2_\HY5OpO?շ~}`3Qƽz:^PTmw;\>"*I?SčA=*Oő"=Z"=͵=t: 85p@7Q1~һQS߳LD?Ik5M޳N7'MOsQQ)Gvc~_Ƶ~c-h(ȭA"ZóX8}7^{O=7E5vMQMUz`o2n~Z^[ڳKKJ3n!Nx-9Xg4(rgtI}vl_}T%)8^|g}7#zqfY$!]xuދp`fM'7KDo2s{-}3l3WR,NݝD3]@-lo\9p'sīIgGiv>~{;譔AD/S76ot~_B8|tfړ&շsYږ~JDoߍGRJő8}ѿu:Dw>ʛ2up8 M3'=bRDwo|#یkSqƠbџYl _mL߹re0p8 ?4cۓ"tv"u{[ߨLck]_q,o=wtvsы4-8y/K@c3«'M&ԧۧ5=F͛eyGn'|=o.ţ3] t}&2MDvZ5&=g5Lk|u"D^I[7s'Fn=zt{=Z+Vhݍx]@ K)څ?~㿜: B@j'}tb-^ymGHKeh1.pPHs/_B.i6=$zϣ {F9ܶ&ci=뭔aD9xk(Nrzv-p_&4gkں}k1>r hKKgSJ3OoV݈~FT5A^x\<``eR6ϧ(ӈ>f)"ND/ы7VR*R80 $)⿿S<`/`yk=S|Fϖp=}&^+GF]߸u}jD?}c0x{xp]@ "O}7s(I)>zmcߛxkzqki&vqcc?\#j+EN)"=.X>ǼpL3ܦN}ϓ{}"͜oDtF9xH*N]YlŢe_yy/&s=o\~$n4I""ll7?vt{}#zbHiH*1(9RGv3?+[E/^x_:wy!5KԞ4ǽߨ&Fq[ͧ+k;t=/l漑HnIDJZCQ|˯/{%0Qz3'={ݨH^JIDD8u,ۭ.oE…g/^{90+s o7ݫ ңcƭm:GfAGwR:ӈ.Fv>Ͽ'_ֿ F{ Q]FDq{ĝT76U~7]>N~MϩG,"bsd{P"zkn]+#W?'Z3VJ6sW[)=j[7eD+K n;r܎U~y/vJ@mڈ^mmګ!}dD72܍gKD[btl#絜jHG'o{NJ|!pp#R-:|"`^5*OӇGm0&k 爯v&#-"nG7)?ZQΉ")"wy "K9 6_{ƼM@aTDn>i|sv27j"~i#n%n]9'RjMxmBKkw""=yk;Z$o "Wr?};$1)7Y;kW}^U]0z]E89"vJ'҇oRgC]*"HGҿx߰E;˸5NG )2}vÉV<>z"._yۉeD󍅔N,crhSEJK\ vA/"z9R\L9o(sǟ{xG@gTDouMOν>|^u]]_!4D?[]݈r,b=#)_*Vry1ltluPяN~D"uAos'v_~wu_~KM@é)W{$`zs #zVC}O ы2ؼp=EVJǷ~x'";짔)r'EwPϽڼ 4}O7ۿ^O}-Qy8y}\ڏ9͈^z IɻbDEt+EsD7")Sދ[v[ݩF@mڈǛ>;Ǥsv3Ėڝ3X(~J(b=È~f̗rTl|vxP":9R7r"u\vSSo :P?чa)L'ˇGt&"\Do'9"RZ;}Hgor.]w6%GnE\y/x0t bs۲DΙۏHM{.gˆh"r%Iř2_F؈w")A귮.?k^0P5Wt?=y>3ѫxQ~qDonO;^q|ыtz=9ZT7Lo:9[v[]S&U\Lu' IDAT]:(>n&)^dEmG"V.Z*.#ND󕥔>vwg3"z)#u#~*Re%A/ZKoݞtnڈ^M۹}aHoD/NOng=VzW4o|5"N?x/LQW΋ݟbK@yըؽ>͵w#7jmDWg<Y#z(RN'עGtZi`VsD/EF;ˋ9b%"z)r3VʥJzp)`QFEj4usv?}X}\8ֈ~wVz_^X1GDQZ䫋)n8f=j8#݈F~DDnQtR.q&E]ћ4UvzۍóգaӘf+Զ#z:q̫#IE/SOesĭ_~wut`q=ޙC|v)'[gl?:.o4G<:(Fĩ=?lFD/EtrnOEꔹ)^tExj3/u{ 8Lt`"z5LM蓶sѫAqk߭>}z^ɳg^8R3s"v{aG{O2G%)qjx>.y|T@z& E>i|-ѿ+9wۏőY.#kH~ok5"V"G7WD+sF^+tXY*3_F/=CbYLћF%px^}Q۸#_qik w7gΝ{aֈ~m\=::kGĻэH܉He?\v(zW{̇jMG܋M,=YsSD$ǽ߱}^86CDc_яN~D"uAos'v;lG5PoWt j,߬d3ѷчk'EIS["ō]O?RJSe7ɯmN s"u\V/o]\~` lWu#F^ݖ)nOs&N#9ۉM oﮯʫNʼnI=jDt"":eJ O2T;GO`t`ۢ׷G<^}ADQνIߺNJbq vU"/H^ι-"z7Er}o[`OhCt4g vәчƈ5N迹{"A#BHAwNG~DDnQtR.EroXxPt`}x_[oFqS &oUޯXXޥG>tvon?T@`jeD,GD?":#-ΝK7N;t`7'ЫGE裶s'v>ɸ__c~Vz_칳/h^G3%ޭO;SVu}p__`2.Wc4|sbzujv"z"UK}?>w1_ތF6#X"rN)b9""-7r܊(Q}#&O׷xo>Ԏ;͡aD2~ycs{^s~2ǻkן^l?،^Hȹ)s٭O/ZKoݞ{t`M8]5`uڱ5ׯ?>T/(.nl+=GZ-۶V^qknDSDM)sr-诖_}?'¸L{<ŝɽq۹ھ~6|w.onnJϞ?ՈGr9XLE+02"#IElN <$EܛFnUZw+?/oU~.*oծ?o}]?||ˤxÚaב#U?=ʈ(ϴϞ?b5xqcK ǬJrr^ˑb%GRN9Ϳ>{O _᫺Q%:*=V,u1НI:js[/,^j3"z)#u#~*Re^/_n>Gغ{uwswinN)"G{O2G>%^D_-;/vދǟSSn:Gw&v"zuF}rD湅v[}u1k؉KEox;G^)"z9`ʲ׾Kw `m'}ԙݫ٫})"z=Wzu }q'owW^}A@aڈ>4>.7^2W5bz}QBGZ?{~֓ Sc K9S:e.+gA/ZKoݞ[~N~q'q~ŏLX#l="ލnDGND(E'[D_-;/vދ`'~y9YDm1.~#YoYZ:yO.?i#5Vqoa@NoF±F x'SJʢ;(Z^}m `d;}Džx=j+D_<ï[o#'?v-#{FnDSDѽSLe?a)7݈7MקѫGs#p)nު_ [qF GOG/NZOX'<@fDZmseN9zQszoޫ*؋4[7juzONMg'#9"ʧ~Ok`Z"݈׷N(:)"jyշ^,섀UFQg:g|VMp}QFoLO07MQmEyǾMN2"#IElN}[p^6*1GE,砏M1y"wyĝZD/+ ?VSkes,G^αR#z9^+ND{sy/"N#zz5j׷q.41^Ms:[z.kω(ҹwBc9G#rňo治Yf \Hl[Woƽ60Fi#pʼ)74^};G5Mק>>8:u^vDg9y?itlgGF++eY^]X]9^l^^^FyW_~jli ~M.WS=JSD#>W5a\QN?r|OiZH@Y渴f卲\\f~Sn,ܼ{}vOo7)G4GQ 4}f ,=E`^z91vUSȬkQ}tqVnɿ#OS:=sk9fΗ6s2[|i̗o{0?7o}n7=ܶbm3`t`nD5> Z믭kn4|za-gcK?|HS9b fW6s\Z2_1\^qo\۷エzKoGM7>)Z0%؏M[7Ei=LL4Wo޴S{N;N3GS/Ԉ592 FW6X|iWVs^yP^m.ѵߺ|jg&]ckشMG]mj\D5^h>}ܮL:f@~GǾ퉅SEŢxVSEc]YF\.s be39_Z+7W76o/k7.t)>Nvwݘ\oZפ؅sxt`?&GGI[7vƄ-G^>rr76BΚ&GsfӾ%:!~@^OG~eZ;y,uI^,kg49;;]708EiQM[ׯS~8ۉn>5;ޭi|{T!b^M|ܖMY3sdtY{N{ݝY̺Z0#8h's݈ҳn>z9|7^'#Ѩ>=i|y؎iNnl?i<՞:[|/<[߈~:&A@ ~Sj^yGH+/ CH@8^}#O_~:"= 9."H55:! ζʟ{%+"?F> p.|o+r9-{={p'6b'#,"Ast>,a#xDz`O=uةc ^~%S_ ϖHg"^~'#OO#Eϕ"E15:> yz0hBd `鏿_nܠ?)2`yOlOӴ9!!򩧎n"p^ЇB@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t""=gOln )m>^t&LNq&"RN#t9FSn*V+w.]/}Ky~/>Z"cEGr9Fy5S戕b0xWW/sl_)'"'RY>S:"#XG7#b%^ nUv/s,L IDAT _BKӹ<x*x<眶oAq-t="noF˃V{7/}哟\h߼LNĢRJ'"#O9kq#xl7s_{jy B7m1TI28cc0yy%|TMit*SpX? ZK0F[_dDZU# ylyO!F!g`10PKLkx४1JHee--0L>@KFgWdiޡnLOCyl8#6׈72Jv=N޸qK vʫ-,ݟHw3z҆ ;Tda2x 38.[ @cnhKt͛[b~=`1 Ɓ91cb|m`z M F0 z^-ڸ_'j rӥ ~*R*])#|/`p/o"~f׮-[v^_+ffA%%Q ??E"M5U򊩎_1 /bhgm{xxm*++Z[gás|B&c̼"'*j !r ľx=UT߸c:@WU7n4kC4S ]%Wo]ߦnڤy9>Ipw<&X8+v~|sdӁ])ڛЕR oD8/!_6=xх Zs3-|O&2ӄ.V8\uąCoOs pZ&nQBe;g0 `tS' ϟtP~br3"TiiivFv _Bb;&8 Q}rȁLL%M`z2*ѵFfLLĵgOVO0rfǼMiU[8H:g A^fz1d[q:ep馃hexqnctPW:@O9x8#TЕRi!-P9== #lE*j| |M$\5)|834vEAcT]R0"1 ' WUYu#,|ގDc>J hZ=VupQK!M ƟdA}}tPW5y~u vqC-TSNN֮,l)r_Wµo/ݣU7}ik~MozL  MKG(-R⍶RJey"x:bđB!2,"qS$\#۴˄1<7s qQu_ST8?DǑ53UUH$ ?@B W%t5W1 ?"Hb-L4/Ãj NujLJ08&۸R:zrHK;`_!oq.w/n0'ۋ7QvX9eJg ȔnW)>{ڲK"@Q_PmJ#Di|qcR릍RJŀ/<1b$p{ 1ɬ<61lH*l+.ctlpVEŎ"s=gJ}zp~e89vtȲe7}h/e8}x?裗{vSr~KdII ћ7E= $p[.bp gΣhܶ㨑N_"*o͸qqq? Gv&ݮ[nKy:@O0; 7oRR*m\K u_j pidU||[$\.8a>k$PD a?D"JǨ+ 88[%\0Ued[(%`ĵ=noS<8#^אC+ n>$"ge{&rpIoasmNEg=$nܸ1xk"7tJ9=L3p1RsA1I剼nC pGQCiñJ)ETJ*++;竨f8 ;<"_HYٜ1IJdN1_aRRR*~I9x]8O:F^ 8 sxNfVF?xt[M8<4_Kx*yeHפ=v@MAOYOohGwV{USNNWiiӼ9>k+1Oa65'ڶ届9k rw2- *c7<"ϕRtJeOz]@P+ݓL8 _Uvvz>o̐aG %c/\4s5Bg.Cj?ԴǑ ,+Ϛ5˶&631W|Yw21ٲʤ<$^4 `9R,; ̤5yNwURY3v|uCyufZى%0%r{aʒ"t]IQG׌{%c92d[V9S:F/(V3>F'^gG@}fܸ<?+f|^q;}=>jlhP5tUas{ΐtRJJߘ(R/Sh~⩆icg?=QP1 9M GCb$x?oH*"k[q˂322k@(_/)1|_:%93j#KC+j r_;aq=J땜Ckm $wwum~,79g?ct]KWBΚL"zL)ltTʘ1r| LH--->_pns}P2kw#H&3rG!;x*k3wܴah@[sWcESS/tH*/(Ȭs3#{ӥQvtegMI02)''K:FLǎ*9d /c%T0@Xw ЕR)=9#3 W[FH>(K}_^YYyh[[ _˦$D竘 _xVk-Vb|kg`5pl_ /wŸ n&5ƍjGK7co?JG(': 3ptObbg&V[8F4\M~QЅ-63矧,ݱZ:B)l7RI< d@kSڽA피cv.tiCZڟe nJZsP(-~7΋ b5ifΙ3R:E n>{3<FIRHF q_閤CxrMJgV_PpHmA<+&I:DV+-T7wTI?#Tl>jT63~-эM]~tRJNJdF@/^50<tm͛H]ߠ++**j{hJj-[-+))H/xC4/뛸f>=j^$x4JG$1sFH$#fOAiW?1^QJ<*F<:d5yc^mD9t99t>v$hD)HVJ%s2(nI->_Z9ԋ3C5^ΊgSJ`|gmHg2ϗe붻nIbQ={vZʌOmzqzEEq ϳte[/suY0xtj |@tKU{ōo-QiP- 2ksПkBzp23[ 7lX+]RBJ2w;wtX" L:Fnomk==)j?8())<\%LFs @bt➝9cFhqNJUY#@HՌϹUK7(3nHsm&㯯x,?âh{ ߑnIBƋ#TM"tGW ]PJdЕRIcܹvnyLW Q_qt;Ů=09y~W;RI(v} \ ݒ*8e׮֥Hmc|ziiivFVQrF&=&? l_|p1PSyP:!0;zz}.ߎ8S%1c]A}<^. ҝzRwCR*)f8[%~PWVt^^^qՉIAIϖ TK|  ;!8@i JqXS_D@tK xtQaf6_՝nܘ)s`=ꦯtڂ_<鎮o;R*])eP(df_nI@\YF t `08@fR`ኊCCС#.HYDו)ȕ[xJKل>_ $=)_jƍ# nIH)Gڂ1V RMMA#[I,ݱ7zqߓPJdtn};H1,""Hn w~9eDդ(J|_xtG#2lNJv %8'dG PRR[n;v0q0KW:]U! fg!&?`~}U=@Qgtꟺ&Iwtń׽MC)ЕRV*2\# p +-zt@/W8?9)p_>+.;! t^In gΙ3,~^ N6숴}17ȕHG` UaZ;/E \ K%Fqc[VN9Iw?HG(TBJ)kٲJnfV}iBLnNv O$ [%0_$YYY[a _I#GddD:"VMg)h}j 96!}m}@ƍ83A le5{!qMMJ)fR:Y(?p0pW92쒒̮^N/J:زew;lWRRfttKd8w ϟ ]]蝻}F%0g'Hw$`)A-)ftRRJw QdV;vh8a?~!6/J7 ;h:lgJ)tNk{tS;ԧ< ^I :͈ٳg΄K/MۭYgȰ#B aÎtDBgOpwaHLk~,yNH5CH7(u P{)7ftD2Z3=#[R  ׿V/ݡnܘ)4S &Ьћ7H(TJ(TW~W9(Č&>! !r}Y!D;%wx<xڣ 椋\*b@ ĸN6x_v7z,X}_,-zHvvfL|3N#D+hQ ztK}`߳e(qPCb|D^NaQn %-h$BLMsCx :οpѝ0fm(_Fc o6]"bdh PT}1+I$gM:޺-/9ːkشHƿKg(T*ReeeG1xtG3i"8c²9'zp.:n8ڏ`EUUIҡ Ǻ>4K~p8tM|>a`6 Zb@ҥ~<YYf+ |<0uDD:w;w,Ր2C m9Mb8 5oMω'%O!1NdX}Ӈcw+?~m35 !` 9N۶g%bma #v248ُ8v|YE IC}6ΑHIo:>CZnoJ TJr<w8R: *coXT*++;<3 \ +ϟtK\vI>8(+cj x}F+ aHX}0B/KJJ/_J3_DÑp.,]x5܈`.*ΉD>%;r.1褝Z0J$?#p#; ١Ad :`ܪ"rƌʟywSbf|$> ;x]ԹR_]7<t_s!|L_!Ba?nw]&!#rӇ`#AgUk rDLWo4nq=+NްaAE/`Մ|` L) b$N l"o;o9lh3 9H'p,'wNBtGW |rssʽVJ))6TJ?8%~ }P?ss@t-'pztG<&_K4g8/|{$WTTbj s:MI Ay p1=$ݱ&bǡVUU \na IDAT|Of29⨩P_.[tG Zc"B\/jP(}k_X@ ' >IYwfiuu[g>-j|T"9w:rQLJBtH",)&Eu#Yiޡu1ˌ3{xIzjyc vCoNԇ8]Vvn{;~|b踝[+V:Dҍ gxNx!zAB=Ckg ቸno0/_&1Bpǡ[.\ ,yYD|߷0bw Cbf7`ƤOK+Ț 9AxtTN{5s^(hI^{)^mRqg\al9'77H(; pRwRjjǑ" smU70(xN˘q;ݾ= Ȗ9tϕR*RJ%Byy|}SoOH^ ߔpҖ-ۮfz1iUilOb09G.95W\q$Dyyi .؃@[]]𕒋 ~-v6lxpqdžU𼫥K, / ]V2X|&`yELZd|_5(oOѪSׯߚ4b-?`fќKax\yyD"Ռϙij" p|qC=L:k'9Jw|6stRJ"+| r0Oj7zM4{2@v>EJq̙3⁙Sy1 Ͻcb˗/w#`w"a-w3$B!/9% tǎwKϟ/2lpt&pbKJJ<1sN۝$̂ GJ hOʬY {!Zǜ5q >싎AzI3G(]Jbp]cv7I MߟWqz6,:xq冱cSsm_}@6ޫٟV".-nh_TWS:;Euoo'Ѿ_RW` LmnN{XJ)%ƊJ48~c3L_x+~X/ҍYHG{Svz#T-p$yG}blDۺu[vA8\=st 6;gٙ1;lK#b /)pXp*`dGyȓٛ퍭T;vhHwts[٤@)nh^f c`R}6uӦnR@ظNnk ׾ݡnU MAtA j񘋥#lPƝnUfjQ}:~揊ntb4uL;X:i] JU:@WJ|gx˸޳~U.\m= V|ճfͲ~ t'aBT/)@5ptD"͜9s0B“x*XEuufn閮ZXwr/1[R~!Xtuu έMFI@{L0NJfI揊bǜAzMJOLx's&74&ohX)o=@8 ƕ ϭxtA\>KUǹS3R*񂯔JCub'عD"[sz3SnzmN{vxH$b틪H* %ofa07mXyp8g¢>e7:=1¯#UۦG" p>{/7GKg3i:'I^{i`ON^.@sYa}µvC ?#E8cV71Dv g)oeⳋL&?Bf)ݱ a)JZisCX)e7D]4YZ@"ײ+`3fT߹cǻ߱m,|\Wmٲ|D9gH0Ol ly;'&鎮jk#Uò{,Z HtD_ՍϛZ\۸cXmnn-ްQCm_q7AJ!ot4c6vޟW2}n[i)D7]p}2Ju:@WJ%C‡;zbQUcMcx3k+d>7rQe  q*D$AEX? avFQ}EtypM7ֺ,|;"ie 리kZ/t Mޖni=@_3>l_f7o{H.oǕIwti8:+RJ)):@WJ%T 8>.mÊ!ڀee֯zef(zHQ@@|/Oeeǂ\@S$iw:ren㗎39ZzĈnyaٕYձ};}U@Ï/R*{8<[c?0X qg+rS೺J&kG !;>Ÿi%R@J2L?-͛[:7ϟ9ivo1q!I>@g˸m__t[~*Rp82pfIGē#êw^^Jw|%%%錾I3,X* C3|"ݲOڂ%LX5=€LG+Ltu?=eE MeVRy2L{>*TR*a***P&ݱ/zE/׀,{BpːΈd.ZxqRm k@?C)$H=ʸkȡC8O:/zOnI$I6WLӥz'otRJN|tA08"+ݱ`c} AE:&-y3m!J).tJAW8vmm?A~ cq8ϟ߆d=OUU3zkٲw`#qRrEJA/Gd >e+ảD"3`tEw@H-ܷ8X .!ⓤz^TI–cp J͏I뛞P)''nv{+RJJ#2=˗'M`Cl=%G2@gfjt۞2`tA0Ɠ<+ЙoD"3b0 PW7XsS):^qLmaαz\Ω -ݱ/f j؝J%O2`baפ;R*]])W>0.^{KKJn{f'1].1a<,X-k@x@-jkZ|1ݲ@sD" HwtJm܍'ip0#ȱkLeeet)5q DRaFc/5[7Džǡ_H7txpYnss<ȮRP}Nkv9wJ)t+"ϕFEK.m˗ K0-&NZ Zxq7ƕ%`[C} 7^հa+l…=4g>KgÎt><zI7خ-R&{}W;b69m\}S5P魝HBÎ3}ŊR)JJxt@'v]][Řz[ ^KI *a K:#^1`VZz錾|࿇ZنXp*`pFv=8GY#n"ݱ/f'O`#` JŶ#8U:!^ nl@gNU?fLZKJ)t`08)@ U Ic[__:JGě-2Η+/8ZB/n%醞r]w̟?#錸b<&/+pPryno3"t^ zؑDHc_{;8F_mltRJ)+u2.R}1n.gUT3{NS-p84붥e %Y by I'aƖ&m 't#z9Gol.DJ)kE l1GJgy2;b OJwfB4MzR3RJ])WttA6ױb5`> /D@`[>pnIIONEE! hayv6Y$S L={=AvoNH$A#ޖ,Y1k;?_`HN+T?xY+c爥SFt@~鎽\P:C72d=?YRt@ 0 |NO,X(Ȟ}C:>昣l8c9!Z[w?'zTr]|@T ø_:Ӥ#z"3- htC8 &KJ)DdyIe% `%6P.?r;r\lRtT\ {0qZ=I08!@'P(d?.]N.-_"ڪ֖8C'vX 鱓Mj ʎ8d __)5ƪ N:1fI7ztJo+d!3~^qJ)CJ`r>'gѦp8 |c#zlɋ3e7rDB1 ;1sΜSABln3tCR*9[2å}A]|룖?IG !oA{J)LJXe޲g#$&;:e_d`+"ϩ  :'sחD[#VH!n[Z,X'{[[8)QJ[ {q`7io)3S܂`(M^3RJЕR1SYYy(;t]00K7u+Йg)K[ti a`tÁ㈮>J:ж DŋIG馞t0}$)N% IDATPJ1طAjT6m0nP髾bBԣ R* ])3mmqL/K7؀w  d͍N"ȿ#l@ ]tsڀ b"H;;@T,VIHbMDxS:al9S:B)cQ0+E?o0yCާQbC q|Wv(+b?/]nu DO1N@` X:8'%%% OnXDv?n Uaab})DEL!b+6`% J)_~_a&tfk ]1ctJ_G5̘7yƴYP)ЕR1) {CH7裏^ {0V^>{tDO1ضXhA;8dȑ#  1=H!Z+ 4[:"9dMY!1Ʀ:dHGߐn4x෥;R_I'|Ȧ-{l턱 Ʒ#TzϙLLKw0S帍?PJ)s:@WJĖ-( zآsJm+Љlu6 a37c 쌖nH"۷[X cEauC>q;@}Ⱥ!.t}:h0$.oGr#s NkNH+*68u3PʺgKf/@WH6W1 dC_s*/SLTŇ-xBF|Ʋt":7trODsn_[;Q\=0#I =zə.0xgx{ GNnHQ)޽c:F[7KץB :Wk8ٺuM>#DOg2{Mh8@'3@^tUTx4V755#D# t"na >,x\pY`uC¸t_͉hu׬"7Oٺ .n!t&&*-U% *[l "dIsޕ+W>cv~ I@z˾}^a f.>@!ǗzC:Vg|y':QiiƺG'PIZ3gN@8e DDT@Ф#hڣْZ ɺۗ#{`:\}+S_@aB6,LDTR`EDI$9nj捨 tLnH?J/)  8ͺCBӺ(8hP#7iVwKTLn"%[7BXc@塵nHז "uvղ /e@D&In>T!=^U8:)T}9rcT89V tոFjDQoGXHTGlkiIW7[B߆n^۷[Qɤ' b @| :[UG d FD Szuw"PoLE#۬#oz.~ г[@U[7 PG 806;j/"$H\*3rZUiu%ˆ/V 5"*Bu|0k|v+pLzUP3; X6ҙ OQa ~̲!,v~""ѐ(^NUn;vvD:k;9$طoǥ;{@T'zxU5j iٝw޹@uG4\ *&Z78pmy`^9:l>x?1T>;t$"@R[n^؜zsmu{*O+PE`61֭tȚ3GdeA j_7ЉhNpuGX9@?ICd: ]>eE3={R9qOT6ffj:[Ks56m!h:uB$r子?VJꇸ2WgGJ!|OîKS7eɹ7?ahqND&"{:1JnN>r^!}˥t:8vf#Tڢ裀Z]#%-\CDD: A4޺ G4R|TTϋ\EřLe{Y"FAUԧ8@'*=gqO=~%[NΊWZ[k]CDR[5 }\CSں`S&>K;+[7o) DDeV"J,U[~q@ˆuC> w 2Qi@TN.䢿u M"uꛕ!%kI\R[ :0rM(u.-,6]yH~gZ'Px: kdle%DEVuV uDښ_xƉ4gV#FmfZ7Q1=Dt6[L9뭾D;@$ D4h./X$#4* >ryNpA'TrNdnuHƖхDyzռښnVU5г;zh$nu0&% k Љh(y$Pu4@W??lWxֺl7rt" ɾ.!:]ӪnD7+Zj+A *]pE&쾎€쉢Gm6Yq% bnC?NN arצ$ *W̎'!v-C&xc$--骛VuQP@kVoP~b&puQ$:JSv^ :qMHGw|m"": n]pű:0)eAAm]ӃMT* Dlf!(Piտh:uu QkV5~0ݺP9:!_ Tp Dmv:ïODD8@'szuBٵkЩ %*b}-ȭӺܴ6ۺ`$ʵV_mBT2 紤kPxuiݐGGN*=obi k&M2t",GE$f6EdukCw{N(rN@%F\ ^ ښM>:(V/ҊPɿQM KXWQo"~ptס?ۧ}k_;$xuU?t m9@pQ9Kg2;Z[ Iol5= $0}J]kmC:+X'Pi Yw7DDet"1c:|~ke9h"#l6U@ DԷY nћZU6#reg;Pu %TZ )RuV6Zgנ"pq*RAN<#,h):wu[l"y n"x豣oXYZυ^gBfS?8>-S9 t" |g)Â4De6_׃M䓪p(6mAѺe΅uӧ̵!2ꍑnZu}#OS;hpND$|FuCRѯ  &QN'$ D?3?D!2-%DR[}5XJ  pʋ_ei7@QaQrD 8,;9X7$zqNiUxyPpLNpN uo}o@a0NZR}u pk^_hYTEey@~y_\ ahYOJ"*3q[QqND$DQ=ok!*Րu} .S!U}JO A*uP ?U/*%La,tR+7Щ,-S,uK-DDT8DѠѻFLUnSag_'OrNt2Z֤'&w\lSh :]_3 ~~n%ښzuU4nS}=KRF_TN;E`BDDf"JQ3=0bynQADQirl}mSdYЉJ#eG}-ZZ]S0}JB@u~Ij92Q^ qk1JNK7Ս "%Fɉ4d t*U|ĮȚ|Y )O'Kwy"*kv%=Gεn*$QY>]ӉLu-Dnʔr!XN 񭳶laC'V!3T7#TB|: t"H5mJlU{XLUdHG$R/+ǚpXG1<T%*w33[tq* K{ IUk̴u Q8_<EmGw[PٜIi Dпlr[h8@'A٬wE˿91ԥBrte o_+"r17c͛SiJjvu QԤQ[wTP!_Hg2{shPS;K:GD(A|JCaՈTQ$~ T ` Q65)aS@w\fBt"Ӫ Ǭ;NLBvMm<ӁDck7ںt",7㘯gh;Щ@?fl?bʬdf -J ӫ&[nY" pr_VR37}GoCCՕr7ɒJNHShSKDWfыm9 g:ǫv<STlq]x2,ʣ:y$Љt",7n~q BU Ѓ룫:iկS;z)lo#B|6*uϗyX6n%D4(\j~󳭯 t* GFuY9@'"rlm?̆T=?n.:ʗ[u16N7w'CNzt*+Trّ1'j "E5r3lEQ˛J8n j ѿ""͛lU|@u ]\[6*OUT))I>a@nS+T,*t&ypі-ˠW*[DD: ~7gڳml6>% t_6hk2Q6oFa?X Frk],bU{뎣^B;Sͦ-ru}n-b;fk#h8@'Ay]-"c`ݧTϛ}J4 t^!?Q "":Y?YغeP?mA奦CZwQ_ݖ-CDl{4OwKn}(V҉(pӣ&Yf!"e՝0κ! ShauOws_wϞ={~9=ź-[۴-m?OK?vz4 *z賴oNK37nO *>L{tv X1O0[DD : T+W&M$kիsp`zz!"~@7-T 'SepuڪyKv7YG ʉmپ\-t"O Y_XPPaM`!-f@DD7ssE!G[ښˬ# 1<]*zڵ|0l 7qh9*'.QJe8BW˜DDEm5rS puan-xkDDWeˆX5-'6j#s2Z+ȎG/Ftr`3?mh2.gX?\I:QrpND&jsoxn? n"<%HQ\!ݍ uK?yZ#tQ?ۃdz~:l)2Bl*IM@]m?? ""Љhbp3,RSa̟?=Rk;y;Q_~ߞ3sםd@n\iqݵ:l#!!7ѺJPMܱ`S \4hmUU#;8@'mCcT< bܸqnᬳβ~k;""*vfj_n9!KZkk^kA' Xn6mKC-4$eO37`kvAKl{+-t"//8: "q3@W)*no馦Wǐqu7m]-'Ѕ Tz$U =DW9r '['Et*+22+sЉt"4ӓ,Y>O@σ#(B=Ӻ5~H=TquV}M>:JG&=^g lߺCn̘<tҙC~H|֭/AB4+9: MG\.N@?IyJ,m۾.:Q @+[GT:4&8 M ˸}N=]i< m{`/RKt7T -t""\ODɔK#k]$ӺJ\`ݐt<Ğ^DD%*tV["** T"TnЭKR{#O_x߼y)J IOWyFypJ:: ZK Fn555Elѣ9@q4@z8:!.N "d;3-Q`ZkmLJL:}[w?_)r{ u>F2Z ͉GWL:]iADD{=ۣ;w ܭz:JB Ѝ@_tn>~oF"7{{d[zf@ɗӮinrQ|,&Y7P59mV .Nn9: 7jわ'cGCFp Hl8F7NP(v7MmB9t :Ϻ[Q? nݐ/Pg@uф[DDNqNDCf.T#7,n`pNDT.}xغ3Ϩ&$ cP/ׯgAduC> :׺/33m;WtDDrѐ(| jn;P$n~}fݐ$cƌnjɒ%#=˺Ǿ}'н.+\xIDD4.lr9}u%ז)SNVA}N,6@$p{O*Z'39: y&q?zB޻(Fs uGduAN) DDT3mP]a2K(Db)P ӨQ j24%7}PO/]eADDG&:8h싨 b@D}݇T.^~>Y7Qth38bJY 7[gO0E܉luQ_|)n: IH# ybZG8ip~ND D4$Wpދ;yݶP$IeZ7p. j\  *i""*_W;qkƺ$Hƺ :(sN2\`ua_u: y3/!F$t nH ~ gUtlqDDT\ptܴΰn pYh. 4r^WUuu3iݑOEND䄓7Dl]co<s 8{cpًCY=ۺ[ɺ`S6<Rrw(ɌXwO ZGoGd:D$_+кMJ|j"" KnFmmNHHޛlV=+WoCkzDn ""eC nOԯ_ar)tp@!zuщԯoߩZwSՏ7o^ʺ $@k4o~6U>@n]"GמNBjC EZwt\4:E@TwY7PbQ-t"";Qabu+*Y'nH *#m n~*DDe(~c@I.袺ߺB:鄷 ƏeAt"BhqS~"r:"?t`UW]'֏2@W @/a*TSiO9]`q, <o 㧃zҨ_*Qr=s1UQ\ 8Ӻ!T<Ǝ|uQFJǬ;*ЉpND1rd騰ʑ#Gα`s;\ĊOV'G G8:#ʕ+uG /fADDT['H]li#On}v$(gp,yuQ]+jqvzuQ9 nN;N~kݐdѼ ~^^FÛ@ths֮Z7lNݜh cݐT";xS ZgGe4@XwSOb"r:ѠFU  =DǜDTn ;z;(7Y7 "YfuC1 Һ\*'Yw$լKfc:Rﴎ t&WYwfZЉt"*fP@͖ʺ!А;H G˗ﰮKillgADD%gNAp3@W b#zHن/S/=CҺX uQi'|*ޅNDT\QRmJxpNcc#Z^mPZb&댄Ty5!o8Lgٲe[[EVDDT\AuC$)Y6=ma~1H*[cݐt#kMruQnoZ~ú#==DDt"*~l#Ժ҂OÇ{Jγn)2"usF__ruWk-n嶁e,네/w?GQkuQ:(" 8@'RGR-vT͖º<kOdP ?.z՝J",n( ^ֺ~b&PuB>ŋiDDTNX7 G'LiQLq3@bup8@7?{~iчڊ/~:?6ruם.R<1(ok]bK c͜9Px8=xݼ<.E[XtuC>U\n0M2@uG>77;L_ZtuщlkЉ /D4B Q+а [wtKydϟ[w `uGGW\눡Zr~~ݺ"7 A  ӑA"gDf?:aHb{ X7IKcfnjxR!+֨4^Fet;3J D4nmں0ŻZgS,N(3է~# mѢECGQom* IDAT D.j\gCԴV_d]Q,!X7t# aD$XwjLG:?XwH ץjCrgEi5nADT \}8MD+eA]CPP7+K>iQ[#jغ(]3QDD3r řm0TPjb9`nN뎣htuPIOpMߴ~nopF3j&Ybxѿ:qNDElٲG]|r%KZw HQ^΅8k-zu`566NQC!=Jj aG;E?cADDazd(ɼ?;bO#Jm'NdQ P<`q4uZNBS_kN$?7ʻx̺ 9w!D_~<8eZ۬#$$"r۽#0YOsvDeiccc_dɈ\omr`ʕ][>~foY\N ݵ:c$a2Sڵ:Xuˈu`LyB3)-t&өB qU$8p h8@'Yb~iqц &Lj;npvf\^]W<߄buG_Eo "mUU3P|yuB!tiɺ7 wUU=k "[7K75Oyu@=8uu@?o^l0huG?jԪ[kGwۺZj:(8@' 6E}cѢE[׸qg7n}iuŝ;.\|;z/ZG իWDSlâğ]Ƚ}/pMzB;=uE!ݼ Xwⴑ#,p߼}E_ш(|x떁8J0w.(I'=Ej܅:X ЉBQQݾ|u1 *Par"-z5MySpE˭;zZb6bXrֺT?O~1DD5Ϝp*oGFVʖڪOb>7?KOܱq zS< Tp.qO[GWkmgºctVh6}O_[w @e-隻Lru Q[AYUo "J*Љs/\!46iX_ .~u BwsꟶV2-WJ-uu%w 1]s|ٟǺe14϶NKkz~XaVaf1Pm˴4C\v:->]x'}=Ú|7fws64}uߛ<۶mK'DWI!"rnϧ?ְu<>s$K]"$@PZE[bmk]ְXmOs_sNmK2ҽ{jmEP$H*$?? !`HfrM&Cs_h2'Ƈβ͝;1 _q̯7Θ^iy.)E> &0?#i[p\U5Wj3f\R5cX$M`eeev!BKƕnYX:v; nm \X6Bv0q'eX--+=Wy2ž.Ȋ?-9z MUƖ|Kqe.-cY _h} [tLx12ޥ_lݼ%|vѢEz @? Q_$:K'O+N-ttIY_UUUV]]Usr*֮ #d9tUH|xζnQ7Nri$אW9LץcG?dӽ yaل Ö+JebeCȶ+V?#)o\eٸ_4yr-J_MlvG >998qߢɣ(X2r0X{~MUظM:G/f5G܊cKXV7=,{f鲲ELrB\Ȏ 2tO^vN[Bx5K,9jw)&E﮶IڴO ǜYtтs@@NctŤk׭sUUU%)O^H/)LSԎ9px\Rxt|[UUk??_fy|';IUҨdB8 eo>}~SDu:y34}j{ߒil\*5ktL<'fJ,W򅥣GhY|7yzG顳K_2)#7 ^/sCSrV$0yB8&w³>&d{ұ_2ԏgeV{1ǒ:GԲq?hؼblұ_:~*w}\{?CBe2}6*<4t\}:GLw(|6Jm@7|Y{}.UW_cVU"IUL;cc8qs1dܮ6zIwy s)7<=+t2 +5tЪlW%sS>xТԛLwVFɡ+_|{\9c*8BgyMM-2²? " ?l mKǕNT:ǫhXɍѣO".sM'6, /<~Yf2ɏ%KroYr'<x'+_ar:{aT46Ҳ9$I/Wl۪ ^lOIʋ8+'lm-^}nwi\:[eee;ׂ l8X*++;PpK]_6ⷎy$ k~UՌ?;w4YSm&GdOy{曩Tjs69sq'20nߵ#/kwq2鋡6SdH$oqG{={֯_?.n_f~(Ёn˰佼ѕ0*ɮΝKǕ^EQ-/+=xZ_cn+\:cRiW1\n_Pe/encYȝQessNolWdp}M҈0&[3lsٳi᪒+:g#UӦM;ݲG.q~Nߓ?~@^1Fst.~| /tڵ/\ IV{Õs&Nz@ܥ9 }mI)zԨQw^]]\n5QfI*)螋=e_\:GG>zF4H:*t,{ɤG=n?Eњ.3kf:!ɜ XRsu1&+ 6n8z޼y~%z~@E'%wnY/id .i=O Wyʧø98扩B] 9̝+행&Z*t\[ZV8^l׭*XFvxSYdX+Zq= |]M.K,qc%%#[Ig2Pf@]KǕd V4I~ѢEC#eJ*ΝifY}ɣN3cs)S1.p%=}SCg2LZUgY2K4&\SS'UUU 5qR4]֮2{hk:a(wLWIj5֮{>S5}~_]0)*mrO:D>;OUUͬYܳMqU5}eIR͊랻 3Ky)(I'3ܻ%ӉG}r9CKm&Y_T,"wH$SRƕX*UmrnflG,!QNkbf@(%)#7+Џמ+7.-+\ 1as%va\:v|=+l9,3f"ͯ+$חb7[FPF$-Б]ȿfͨ8wo!CD:wZmz6/:VlPIgwj~e!խKǕ.Talkf"֡[_fe~ ZZVb-QkܴEQHGJ$HVzX2!W~y}yI˖+.BgLo]@ƯXҲ?{ޭl˿ )X裏*vzxڲW2hhx%K{k$L5,Z&{_=SS A5<\7Jnv?;$0R59`v/c}?iw__sS5? ȑ|/t| 9p . }Cn{57#t|˃e%VuLW3j##ʽޥK.;͊r_c+}f1)s:&΀br=Bqϟ?D:=R[֍ sK[Jf?tc"}uɣV`)$ӱu+0P+TCqgC"w}7m`6tTŦs}>t⮻f_%ik,ؗ2cƌC&eycWCUT:GZ/_]H܌}n56n`q@7z ʗ~!J Py>b}#t\Þ;l`oNxue$fbUVVvNwV% ty+:ǀ9uu?joYwpf^Hj35g??t` rա3 (w~(/Г KJ\rd:G!p-Vov wY|1''.Be>j 9黽$1iӦjE%Y=5$3Ru:vw2zn{;cc `N]ݝ&mg:>|[`q >r*olM qzO J&WEcsJџM?xdq%U IDAT\vbO2(|w߽Kqmr1$Lvƍ/14DoC9sno ֙_$eBgࢢ6f}lt9 OV4:FٹeW5$(}t {FVO5)9D3L^ѲRdz.t3dP KӤB芙,C!f-E פ=gnN5͚Tyެ,Q @(T*9]x5ݸϛ73^s~:H!_/D/ H"fS9k+ }vfhkțq[ږtMۻǟ$ξ fw1aA9{k\ȍ˷&tم cG>ף$>`d)[7n[9AfvxrD]OtϠQ'py(sg?b)e7ϩcISWsQyzG@f@wN0ĒfU45er }!K5L]ΤFz͇܎`ƯXLhȥBg)n0+C,7o9Q?0NKڴOgyC\vx\@[g:K0Վu mLn,ezhn-skE従J< Ϝ')tȤs^Hl̘1B(p$wESK] }RRC!Ċf^-?sSy}Н%aOo:jGn>7tB`1_{st.\6neaڴi7D٨8źI獜Ty[wYGu ]{SyĊF_;wɄ],㡳s.jgUWWǡà7K> /vCd7d IR'ϗYRѤ!!,ߞ[/{B/J[[,]#Y\yWKRʖ[9[* :K3֬5F?,iW<93 {2.a:8r NC6mZzĩ-\/i3 :=3lgZDߩY?2_,.;A;Tj<~Cks:@4{-ڷ3Xݢ,7|{,L[fJ~(UjS&]Kj }Mhl$VQs>: tp~N/piu,3 ĕO9\!dveOvue$S,X0˖=|=Zrk_(lٳgo҆Zk/Y(ȎT*tmΒ'^2EypvgB(\@rlAI @.U6>t UySB=:-',?gKI^rd? %^zye& 8~RԺdH&(Ycƌ9%wuMN;=RTTd|X=q:ajl@5o޼̜[Q,qǤGBAvͭ]`k$uX)>Bns/JF%qO:cS[[فOl"喲i+鲙dkLz)t&pf򦖅.u5-W' OpAQRڹBIOΒb}e o;w 7k뽒gmy-`~KMr#t'L>4t Е\cJ߮RRmqtNŊBG)$H))%L{OljDwuIIX[KRF 9]Jt~I8Gҏn-@,YQu.o]loػ_;=p9rH*)SVn\@AHnoȤ+y,yf)yRT*U;CP{_9uo: dΑh^<|qI'$*Qf%5x"=s:K}W;/p3rMM-)"@ONCekW4#TRS<6!t^*O'LzݒsJ._޺Nuͥ.~lI/%όㆧ.J^Jgq_ZF!/&w}fX&ThC4nbSܢTؿMzQIZ:KiHO9Ih. ^&}Lkfbg5#rչMKfZcGoM>9'M:ϱ},ޱhѢ"{$X/**> @@Ppk~'I#;CAKj.L!tqIK9sj×JVR":/Wܿ&ϔRoKj:0]'eW6ϵr}^Й%v5qeJ[[ }VtޘظamYag^Wtɯz:Or_q&<&YU____{š T*C^+S FT}GSԎa;TSu \+jmCS|^J}&J=:*_|{ʖ[ӻ2aL~zEcg&<Ё6=ծy5K.Iq=`be˭Q<]·\*V65k `cm_jgbAnX2vw};t,w%i7JZ#ŏ-WwqG1HS  Z}mڸR% ''\k]vӦΞSWw(mݶ3UW;_)i}zQ2zbc=ɴeꕯM,[\lڶ_.yNf撆G64}z]KW4E5;۽?8@]o֞Λe3 +5tЪlT:$EꖄΑ e'6ٵPoogMmmT8J1}.yMd˒>]]"=m=Q}uUͺ%e2r=NL/V4djљgY4(&6dJ:S/+g+Z2),-+ܮCrʖ %xڞTݙ^W|{kCG6ٍrN3h%F9k"2%0H8,{qRw(f~NF?:@P^ӧO\*ޤK/6}n]_BA5k֩%_fi 0uh> n8ݭ:ܺo cdo9B13ȇ+/fhc~C,դ +wz֝3,pqGt>"4tW^tUacۏ˛.-%%#EoWI' r=?yy,)+}6!tIR"KŊ?2ZXK[f7pL'>WYY9 0Pn6cƌ 2nכIg$Qʌ f͚ub: هmmI?J;6Ȁ~:Z 7MV2쓴IV` RP\\8+*8ƲOlWѥ~Αiz7kekև˃|iG{znzK |)#*ƮE^DeKKч%I!^L;Gޮ{K[[},{fiu]OXejk/(ZdKʗ/g 2wn{>*SeNE$趮|@.xM7\^_4%|=HoOxL@)a9sR"֛\fUnȌ'$=l?FvS4k֬qDZTf%JV*ipjIc5dS'J$- cƌ2(ƛc)JhFlWtliKdڕ ;2ӕ~ y)t:2hG,xR7$I{Q^ؕY7 9Q&1-+YdgKn"K͟VnMQQfQ/0:'Fvd2J%w;ƤA.WxlE8Xdt^g%,X0b aft6Yfu]ק KfdNDш>㴙m(**ҽ@/k->Np8D3^vdv&6u @KkdQCC%DGę(hߏ[R/l8u|0*#(:OP(:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IR2t'$UH$i1t?åmX& C4tCg@A$i?OIZ%I#f!d0BP}Jzi]Kz\/$Z>C glIHz>4_RY~,9B _t(p@vJUgS(HݏyIT8 7$lfCl@>UO$8 bIIuBc.f@klm z#IߖY<-f2t- t=E~ Y%A7(6w7IwI`,AEfD SQjQw߻Aస;%͗tuϐt(w z @O. b}~ p8> s@D{Q p8UPI͒Ny3+NJݣiϿrDfJXDd2zuTefs}R tol#QG&;M9d(P"}f֑@.Q8$wOJjޞ+i׊ ':݇l r=+ yG&]td2ޑDoI/Px5 :23#vvQWld\@pP>R9G:S@?ՑƩޕ@Q8K*]mb$vD^MBP.͓ۘy#Qܻ'Y:COݕlfѻIS r@=)>#s($8V&,E@p0gg?\yttr@P8{]'*OR  tstOHy)= }'ҟ@a_wB`_w3f+ :D(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:( IDATD$ t$Q IH@@ ;Cڕc'Vm/#_qGɥg}_hh^nuI9L^ c &8Q-;}qoܬ[m;j"31\'=B'3ܝF{>qsj;xP|}}~UMXۭc/?Zopre׊ݿa?׽Y'<2Kڸuֽֿ6lުM۶kڼm:2vRL##Шc}S#&پ-#-cti[/U')4QޝFuyc F7%Q%kdYv9v*,$IuTMzfϙ=̙3KW%&Tũ$vlٱ$[]; cA#K$.;'_>@܋h4u9 (5o܇S7C8IEw ){&gPpn1$!ho_nœ0yKDrzƼUs5IEfpsp _Æִ6ux>OMu8L$~Elwc[^Wsp'{@e~EA>wݍm9hmis:0 gK$`c7Xp$ _hn{~@ZH;8ߚ~&v21O{ӎҀ:~/&"""ƧDD:EQpg\@~1 Ë޾pݺמ܃yҗH&UIFQ1Ʊ7Q`w'hkL*8{t 3 u^I{.EIΎf|Uϗɤ7WmϞ- )ެѯxd48T7}Sk'@/bN};4{\*FޱL^<#yWl??ysx/щ4ո^?7T4 `oW|ڈ_f)Zb47h9(ʚQl {GW.AD:Q*.*"8u J67sRщs;*>~ܡ57֮~~2|܃Wx ]M+6 /w.\GRmm{Nc1y#)<;빉M)~? tCEDzPv+'=׾ơH:m9VsWqn?>4]',S!"ῼ&. yjҐR.Dbq'<PϨID&RoVO$wO,y^cDtg߽u2e co 7 r~baZPY^&~t*c~C8X?kw)onR߸:hu,ۅh1NDD9{ʳ]Wql2ܔH48Uw.cwB2YdQ6 әR܉Dc_m =6 {F<RVU? jllOM5NZO$K ˆ} cI:ިףUZC5&k T*`Zia6O&_Dpn,"X hNIDI:Z>q&$̦̃h<׏nſ{b7*3jj5PƆz7ݍ. ׾3W1*pmo(7𹃻qG'# {gxn;FnGh/ؖ5:hC4T5 l1H]^n2!#OQ‡C62uV<Ͽ:%{&^ܻCJKi""88u)cljbwgkVs*]rTz3RU*> W7Fqot2sJ[m מr"=-y2fgj'~كY%f1ʁJ+oH̞KЌoxz.y#-փ$D`:䶌L/]QNTVlhicccm^Y/Zkk~e^+x'"""Z@'",,~СOY3H1ef|'a6:oɄ?(娀utE)QuZr13`L-ExY>VQ$ljkrY^=oZe$ ~rg[;֎V[W:f)u!*enh<hVh2lP?4hLZr#D:lhpτ#QOM",t Dnf>h-a)~|$Id7_| ux"\;Gf-ݎ*O5?k}#99?QvlhLSx*EEW0$^|`kof egoJ%O\vgοZޟ~3 K9VT`gb$$Ф?Q-jqAQ􎦮@봨UZ4TYRDŽ#QDDDDkDD$en)_.uN>s@=DtSxdgny{bTq@+cxlmm6KK$ HT#mnÿ/5sxU9b6[9}h,+F&(@A v5G fBIuV V G%kONqԌ umSEDDD:Iy şٳ ]Mu*EDrϠTϓa13–zcNQ%L=mr=Nb؛Xze&olS1ҒL*8!1Kܨ-ų 6KH4=CTkīǃ!M"U8Sgz2`<Zk4TC-cbۗqGv,X:-l:wtRhJDD$;=w[z7oߤRDTj>!֨PYOZk`4m Q>i5S{ߕA LW0nwIصemet:&wrNJfxt8ThN8&S'v648N}2B4ZvBRh"=t ip{!moo;lv1x"""R: {Mz_8GzF/M &+q`f*F]sڅ'I\gKC*.*+nlrgx\RJ̕ouI٘ppkzڞMRFAxgT$`lJdRI"ځ4%F2F$ڷzs~~5% 6=8jZkA8%f-G)v "" t""F߰1GvvYiU)"*5UآN yS<$. z :35#@OYO¸;"& =Vn2En̵>nZ&6z٘ tzUH:dcnQ2ϛ$7$TH4ZoZ7Vۅ*'H$YDGium͉w!"""Z@'""!I/,7d TZIWzU2Qj}ߘmܩ蔛Mؿy17Fn=ZĄZL*FRZ I%J7ͧmk;~nE&;B/uvWZ.[eL >-3AsA,&##Ƨ!G  t*DDDDkDDV,媇ڱ&A yRImmykאּ%=(e{I*>Ol;09TXw ٶڅK$?x俉ޣ>[ 5إ;&Љ(=Rs[ck :0qC[j}($Tvv4K32E&t頗T]\R1qwtRrb4+먗;=yDiN+Hi BQR׌{6 >}d#^U4\,D۫7ոoGPt4!Od7KkQ1NDD)nAnG'3`hrJj}G%h z(QhHhMWMxM;aseV+!%0 55UKAițҠYeh5*ӿj*#~7لj؊Ihzǯh4h,ㆤ\Ż@ t"""Zߘ@'"dgWookR)*Uir>y|u'`$rIDUKo<)~X\‚2LI'FkSMTVjӽVKPWWUB@7==j"8|ө/kr;KKv4O;{j\-3E+I\Xr^IZ:JpgX|v5t5թ aDsCuafArJeFjX`Zj5=jcScnx`4T_S׆y&*vMKrrI\C]D MgD |+ԦL&oJQe&8cprJ 75;'LHJDDfnB'5jT"S16,1"jM$bz4VSKl^2uwܽSc ,fα'"""ZO@'"U䪨Z%'*VR$٬ zIEY+R\NIңvmA^H&}E*NIfWU "!*N="3=OoW `haI⾳Ж"Q9+Sokk]93$bC㉈hcV5"AVEA %I 8kOHWZLΫ Qd7tH IDATV $g鶪N6^਴HuFYDTT >fѠ9 :GeC +Dc ~Kl_h{cMC> pz\7(﷬QhJDD"X $u3J8$$L[_0NE^!U( sKa/c\KDc]ޡj`NEմ8+UC[J=B,H&+ \nK,FŻ4E4=WpxWL~?n80/{](pg1H^¼6b\Ű/}z^ z.^t\F+K|+ԥ":~@,MZY"=-U= }X0NDD+ IyJ hӧ)$+ >]TFc" O!X̙]cwOjfh 5THV/FxmE}, !o&?V+ThNRDxDhX􍋵Qo^SbF6NЧ)BjoKQ!"V"VgDO$k#J\jTF#4 E|y4z.٭ o̸%a Nړ{>Ƽ䆀 6ZY x6 zX,F"\ 3)4Tۑj!?89dZ7{J+36)Vsa4xyV[#]"O>Cǔ)5 %aHLъB KR 0i6^.H G*NZF^*qEjz*>?vlhƫw"-6 pm!(h59v -u0mt3]6s KuZԻR]HfSo\N6ՈU76H_;s_z] /:#{]fVbdO$T?ϫw㉭'""Z@'"-IJd{SZqd^[$^'7&d‘N#~&SCa|'$:\Bߘ}xp5Lg86WƐ(kÈ/dR{E&pVXT=W%@iڷ@{]ll>0\w9bcqкjge%'7'Dgo 7k4|}'"""ZDD"JD>Р td DQ>ur8cO5?/4-omM'wK@.%ymP׆_9^ܻGvv~ʟf㱀tuZmMm?x"!%[<⿧b;*V,Ӿ}膆x"!6HnX"Q\;Ա&_=zT1NDD+>7XR.``(tB]#HNwNc@J c;/_rUcqkC)Vn Z eSR>u?9VA@x"b?bsKa5ksyZE7[ XxE‚,?wLlye&cF\M!F ,E/s c򜈈HъbqZ\#} IɤI*bU"HT'n|?-Y֭1n!\H i@R2b'*9# tŵ +#SU!uM*B7Җ%:>H ϙO,7};מGK 쉈hEIE.Or'}^'@/:&IɎ0Xo2“Zkj[HZv腿)*D CinĜ6K*igU%#"q'eK{H#;W c&ЩM jP( ~q*˸GpnqGa2!bKvᵁ֫jZPGh ޭ=ո?rMRQ0&X<hV輢D0HZ=Gx6 zDڳjm0uH* t[^_8yJADDDdDD"@'I J*UZVrPqR ģIUf2 O&Y,,e:y忽?y9*|iJ/.;]kSFL<SbӎQhLL7]i0N@Ny0sمc,Q6k9ϣhX@@QX&nrn?.dēqxu@BADDD%l9dQD }xqj'8ֆhT=F?3z{E9%</*$lbգ& Uf4T85.`,BG;!͒sE[A?ov@[BU}Br1SJ'I yp$co^|nt6:$""": {)S)*UFbh%2K͒z&#~t5\jmI?~|<|t f`ܠ'Y)FɟXE;_|,vL@Mm2 z6QzdR"D߄_hmknA 'safa }C<6ݵ0ԗVdXUJdZ/VCL2Ie)KvP?L-p۫ON!0;/}t6U2c1}nŵA b.DT#V c 47u8li9D1RtL@TVOs[`O֧,ZGvvݏp$p,p) O?CpnAo_h/ߙ֦-GDDJM_v͠IK&UXdJ6heԼddYr GˎoyeR.KT^&>[ drm*ZjU(k\ +Eڸ+41OOkDs ?7uڲ>_BhT@8MFجsq`s>_ylHa%'a"%"""Z;@'"UIg:\2!7Al.I&L8RqLY>)LF|ŧIإH.P!XMrTDl tZ4?[D`fҴo *ЁE>p{j[eI}.?qBɤozh^~_=?e乌lNDD+@[de)3cz~Qxb5J#JMv#H)= %03'.9?Ϡ@RQpDس{ I-5Ɏ-* εx\7> TgWlL(5gc`Ex6[r0'<_$&S_@۽99o$F L6']+vvVyBhSDcqUsU41NDD+LC*EB6b,H&INEK۬LF=JN-K* N\} SjX\eawZ%OC&?ov@ͬҊ2s#cL/Cީ]twdwGįE3g~P~IտX"!t̍ LZB<-܉hEr|f栨 .G\RЭze5VRȵt0^2:-!.?Ws+-R-H*Ð@wVZU44+ ?Z\&vpfT&[2l3_}B֨?.砋hq;yu!"""*mLъjrm#8B *ECQ!DHW6JQ>$I%gZf hGKU^& S1*1N =(X GU(x"HDIz-ZSn2Q%v8YML<ήj:}]06r|b&8dC+O' 'h=CjF߸Ox3z:Yi^'wN;}C ofpՑIE&^*\'^JH&k44llXޮvcn Bn KuqqcF,7\9>[}8p>!͂IƱ*IEh\Jcdi8}b * K?YDDDDk DD"Vj\JPrJ8nFpvؼ@~T"43cVmęͥ][aOCKTSE;L$ZAShk>ȣ-҇}WlMŶ.4Rbr<sϧIB:V[W{ >.$mk7j[%Z4OG-dV3$TQ( : }ڠWn3S@ [*Nf<"^Q SWuG/TU+WǮ D SR4mfQhgCk̗ޱҩOņbvSxb$!G""" t""ZU&~ğ~V!/Nj#F8baڸ{Wi!:g͌KT@4W)KA3aUT5I/rrI4IU|Bsم;d$"fN'7׮fV#չ" jJgDJ)%Г>AO&. wG&T  rM*-H} .&Vh}a_[[Uhe1@Ss7ՠl{TQdALL!0;/ŭ:T|j`2{] QzhM5;6d;hiwڅ0 |&go^K$x+@d4db^#QO=:}%6KrDgHyb[ TuZlkkYwG&Tt$%P0NR]c$E*(YAm#U|̙ܙ?"^ d;m*.z$𣳆djXdzn He%I%6TYPQfV1"v%36zlT2;0t}&}wfkq vlA2.?#w$@O#hb""""-܉(VVJnJiAu/텣8|mzXŗ$M.Ey@%+-*EZQH(ek  TOO$sѧ5 GC-UBxdէثPfJioQ? 6^|>IooWsM{[x48=Z(ZKDDDT@'"$TY)E. {RlX/r p$ tvZX°_&>] g!$ 3_r,eDtMpv>KVXt33wt-7qTRgUh9it:4Ilb7@'"7IWxqOX4il^.x٦Z"N,5]}r.rWY+j龳@l 6kT0/v"iZ3976LDkkY >D{D2; t@, &gb6!.0|ANLmu9_͆: V}]9 QiaRrVZ ْJ旊E,Fݑ "Y|h4(@exyznAxesm2Y{8N:f_S~ȌH0# WD`K*FiD=um2:*FDTDIihDbu:dLՔ@=m;^^emo8=CEDDDT@'"7I'8wOh&ɣfhvu{qJxmD$FOlj/dQ>AhN] ]_mQۍ@X<مxD6xJ}{f!^hLc!̍۩;mק%ס1NDDihoV#X|ApVZ(`jfcS_]Mu*F=]<#33%WRl- $']AU*F"gfzTQFw5PEݯN׾%l˖cyX=qglRh]Ɉ:/Y&#삿ODD$f-6*p$w.%thm{L2QrnJX32.U hVqmc6-ꟊu&ųAE`V U-e{?{6BWkt :Vϰ&Orj6z.'qM&ܖ矜NbD Nb<Ұ7?ݩ'%Zd QZ,fTh, r7I8+pHTt/ET(SYb6ڑT|}&N'"""Z@'""aGb(J IDAT"}L`vou[T޾Ij{o^nE[T%6{֟ݫR$N'軚Y?ܜʃݝYUF0F]4g*W$DZꍈ; oP||[m vvt JI=z$è?X<ꚖZuڷiѠO&0\jiWKsɈj*l9+,[%栳 =%ɤ{ """ t""Aw[q g4ֈτ_[I.?} hǹcC}#>~ =Uc3w.\:f)ѝ8E1=v7Qd7WOϬX"w$Q:" tf䋚iF9B煫%_han,\x\UִD489DDDDL>$kw$BS^v1\cf>z$ڷKʅzAF:6C^GDDDDkDD$YeS;*Ŗ5q"UlkoBD29ϩƩKR5yBjcʶFljDE'M[xo^*EA}h@K|S8+Y˅rzz H4DDD1NDD4 xdM?}\b*-.;vn; ~y2"qfϮ\Cf- -RDŽ#QD yE䵻Rٹ:mզ( |M\/}|ˎ +$=c^M(%⿾zFն <4*_سM;śWʍ۷N" c2[vet>&r x2^Yʷ&!,F"Bk[NhzJ276DDDN/R""**vk9/OJhoz6pUU/dqt58mֈf+ XWpƽ6Z|^Rڍ1 6O.v/H&qv~~ bqQyE?Qɶ2;"<.2:֨׫f>Wzj3< uո3(O%z䨴b6a!,qb`‹7Q*')DDT66z#@ɰk)ʼnqmxUA[n{%xg09=14&|9%@[ sYɈݏRsKa;ƉC6r"n=WPԻxǤ+& ݍ_"u\4.[=8[ZVM y8wW{L}h4|l* ,~yƇ*͗,N!$.($З]TQ_mGˎ:G&D?=A~zhI:c-72)ݦ‡_bgG3lCK usKa~r cdžf!}zg-3n`z^>9h\ etlCdO m/fvzsKaTUti48q{xv{ABEfX0&3'2K Vɰw ջ[-qgxw'LF XV̮-%C1a< 7UQ\7avTj6`#bn)LƿZ _8'ch81+@w;;dJf>$& M=?|Ye]\JV71QID}1dpNQ#(ڛfL+@'""ZC@'"k?|lD2%銢o܇ƜA: ՓIED GSS1‘ bj},,af!KmxvwwN_Xg3n.JN}E>X2^ܷGvt`4nFժN#@b[DjqjqjN; :]FlS/SAOUDh߸ 66q l([[^w[#E2Ov2ޫwad4=\sWRn6k:3z ϱԪmWyNW.2|`1Qa^y5PݙU2汲[Ӣ%S;*3mu(z9=jTG'LQ4?M_%!F{`A WBk<(TCӆotQ$:ƋOZphF|R"okI`vB~rOl];klWEpF<*+G<'ʁ:zc|WvxΪڨJXi?_Afzh.VZ #),#*FDDDDT@'"[—+C9TZZ ~xqߎWжg|i hv4 ^ܻl%^#;r1nF(ފ\>ÜUVE{]8{6^{v+D`AW᬴ʚ2,-{X<)Fՙτϕj{T~VQ^Ta:O|rIDDъFݏ/R8ovdG:V\&ڊC6BYmm2|뷎q=$ɼn-+wckK}^ηV9*,x|SljG{kq{xcXބwo^Sلo4|܋7]Smv2'7zƉ_Px}s/k \"ۇ}MngFrME՘JFnq7GUhDD^Cݝьӷzp fT?VAJZC~yՃW>bn=:-vw=R-kAwboW;:{ơ(**3qpk'Ū 9+,lEw[#:Ь·^~S87G_Tf;ښJRhp`K4toq$(р6]0yEf o/DeZ*KT+R@U ϗmO_mhqa'։؍> ""ՕMxvV\7 j4ۻظ _؉IU6lBBwX@aTXjUM% HEYZJ q}Y8@kw;;O_xf?N;S>ɜcg27sw#YQu=S$X?B.\[[Yswߟ|<ӧ216:DGå]oAݑΡ١94c?d7._GG&͵[~O9nJ~_)c #:pgZV׳0S?sܷwrogvɩ?6wojVVײƺt휞Hg8 !8QwѮ|F̟馣dj|59X0:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:ͦ0dJ3z_1E~z`H\]O QL DT0( t`7;RE5/+(Ё#I햷0(.RSczGpJIWAp{~v-w!#v)Џ@:S V)kJWb:p(ЁN(w#`tSwR~d$RʟZgH84h?/# )Ёw|%ɘPtq$/!)Ёw|:R2{?PJ pPYJ)؏F;HHڭ%A?&8Z,]UU$Iל[v5-n[tZys:#)ЁTU$_ٮDpJ)<5n?PP=j6ɫIN7CRʳMx@zRJMPx%@J)/&y I>[JYo:r;PH$i: CJRKM;ЁRJYI$o6 9p)ЁVJI4l){MCK)$LFYhT[gP;ЁZ:I>tF/R~t~r%'|7;2xx=ǕQ@j+lRI.6N$+t~r;WUUM&v$h8$_+ @$_OtPϯ|rAI TUUI>I>dD$?IB)M8 tTU5O%y"v $y_Rl$@UUչ$Iftp,ytyIDATxɀK˾ IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/tests/var/testserver.ova0000664000175000017500000005000000000000000021151 0ustar00zuulzuul00000000000000testserver.ovf0000644!00042560000003210712562113043014337 0ustar jjasekxintelall List of the virtual disks used in the package Logical networks used in the package Logical network used by this appliance. A virtual machine The kind of installed guest operating system Ubuntu_64 Ubuntu_64 Virtual hardware requirements for a virtual machine Virtual Hardware Family 0 testserver virtualbox-2.2 1 virtual CPU Number of virtual CPUs 1 virtual CPU 1 3 1 MegaBytes 512 MB of memory Memory Size 512 MB of memory 2 4 512 0 ideController0 IDE Controller ideController0 3 PIIX4 5 1 ideController1 IDE Controller ideController1 4 PIIX4 5 0 sataController0 SATA Controller sataController0 5 AHCI 20 0 usb USB Controller usb 6 23 3 false sound Sound Card sound 7 ensoniq1371 35 0 true cdrom1 CD-ROM Drive cdrom1 8 4 15 0 disk2 Disk Image disk2 /disk/vmdisk2 9 5 17 true Ethernet adapter on 'NAT' NAT Ethernet adapter on 'NAT' 10 E1000 10 DMTF:x86:64 DMTF:x86:VT-d Complete VirtualBox machine configuration in VirtualBox format testserver-disk1.vmdk0000644!00042560000000000412562114301015504 0ustar jjasekxintelallABCD././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/glance/version.py0000664000175000017500000000133300000000000016346 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('glance') version_string = version_info.version_string ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.8423011 glance-29.0.0/glance.egg-info/0000775000175000017500000000000000000000000016001 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/PKG-INFO0000664000175000017500000000645200000000000017105 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: glance Version: 29.0.0 Summary: OpenStack Image Service Home-page: https://docs.openstack.org/glance/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================ OpenStack Glance ================ Glance is an OpenStack project that provides services and associated libraries to store, browse, share, distribute and manage bootable disk images, other data closely associated with initializing compute resources, and metadata definitions. Use the following resources to learn more: API --- To learn how to use Glance's API, consult the documentation available online at: * `Image Service APIs `_ Developers ---------- For information on how to contribute to Glance, please see the contents of the CONTRIBUTING.rst in this repository. Any new code must follow the development guidelines detailed in the HACKING.rst file, and pass all unit tests. Further developer focused documentation is available at: * `Official Glance documentation `_ * `Official Client documentation `_ Operators --------- To learn how to deploy and configure OpenStack Glance, consult the documentation available online at: * `Openstack Glance `_ In the unfortunate event that bugs are discovered, they should be reported to the appropriate bug tracker. You can raise bugs here: * `Bug Tracker `_ Release notes ------------- To learn more about Glance's new features, optimizations, and changes between versions, consult the release notes online at: * `Release Notes `__ Other Information ----------------- During each design summit, we agree on what the whole community wants to focus on for the upcoming release. You can see image service plans: * `Image Service Plans `_ For more information about the Glance project please see: * `Glance Project `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/SOURCES.txt0000664000175000017500000012111600000000000017667 0ustar00zuulzuul00000000000000.coveragerc .mailmap .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/heading-level-guide.txt api-ref/source/index.rst api-ref/source/v2/cache-manage-parameters.yaml api-ref/source/v2/cache-manage.inc api-ref/source/v2/discovery-parameters.yaml api-ref/source/v2/discovery.inc api-ref/source/v2/images-data.inc api-ref/source/v2/images-images-v2.inc api-ref/source/v2/images-import.inc api-ref/source/v2/images-parameters-descriptions.inc api-ref/source/v2/images-parameters.yaml api-ref/source/v2/images-schemas.inc api-ref/source/v2/images-sharing-v2.inc api-ref/source/v2/images-tags.inc api-ref/source/v2/index.rst api-ref/source/v2/metadefs-index.rst api-ref/source/v2/metadefs-namespaces-objects.inc api-ref/source/v2/metadefs-namespaces-properties.inc api-ref/source/v2/metadefs-namespaces-tags.inc api-ref/source/v2/metadefs-namespaces.inc api-ref/source/v2/metadefs-parameters.yaml api-ref/source/v2/metadefs-resourcetypes.inc api-ref/source/v2/metadefs-schemas.inc api-ref/source/v2/stores.inc api-ref/source/v2/tasks-parameters.yaml api-ref/source/v2/tasks-schemas.inc api-ref/source/v2/tasks.inc api-ref/source/v2/samples/add-location-request.json api-ref/source/v2/samples/cache-list-response.json api-ref/source/v2/samples/image-create-request.json api-ref/source/v2/samples/image-create-response.json api-ref/source/v2/samples/image-details-deactivate-response.json api-ref/source/v2/samples/image-import-c-i-request.json api-ref/source/v2/samples/image-import-g-d-request.json api-ref/source/v2/samples/image-import-gd-request.json api-ref/source/v2/samples/image-import-w-d-request.json api-ref/source/v2/samples/image-info-import-response.json api-ref/source/v2/samples/image-member-create-request.json api-ref/source/v2/samples/image-member-create-response.json api-ref/source/v2/samples/image-member-details-response.json api-ref/source/v2/samples/image-member-update-request.json api-ref/source/v2/samples/image-member-update-response.json api-ref/source/v2/samples/image-members-list-response.json api-ref/source/v2/samples/image-show-response.json api-ref/source/v2/samples/image-tasks-show-response.json api-ref/source/v2/samples/image-update-request.json api-ref/source/v2/samples/image-update-response.json api-ref/source/v2/samples/images-list-response.json api-ref/source/v2/samples/locations-list-detail-response.json api-ref/source/v2/samples/metadef-namespace-create-request-simple.json api-ref/source/v2/samples/metadef-namespace-create-request.json api-ref/source/v2/samples/metadef-namespace-create-response-simple.json api-ref/source/v2/samples/metadef-namespace-create-response.json api-ref/source/v2/samples/metadef-namespace-details-response.json api-ref/source/v2/samples/metadef-namespace-details-with-rt-response.json api-ref/source/v2/samples/metadef-namespace-update-request.json api-ref/source/v2/samples/metadef-namespace-update-response.json api-ref/source/v2/samples/metadef-namespaces-list-response.json api-ref/source/v2/samples/metadef-object-create-request.json api-ref/source/v2/samples/metadef-object-create-response.json api-ref/source/v2/samples/metadef-object-details-response.json api-ref/source/v2/samples/metadef-object-update-request.json api-ref/source/v2/samples/metadef-object-update-response.json api-ref/source/v2/samples/metadef-objects-list-response.json api-ref/source/v2/samples/metadef-properties-list-response.json api-ref/source/v2/samples/metadef-property-create-request.json api-ref/source/v2/samples/metadef-property-create-response.json api-ref/source/v2/samples/metadef-property-details-response.json api-ref/source/v2/samples/metadef-property-update-request.json api-ref/source/v2/samples/metadef-property-update-response.json api-ref/source/v2/samples/metadef-resource-type-assoc-create-response.json api-ref/source/v2/samples/metadef-resource-type-assoc-list-response.json api-ref/source/v2/samples/metadef-resource-type-create-request.json api-ref/source/v2/samples/metadef-resource-types-list-response.json api-ref/source/v2/samples/metadef-tag-create-response.json api-ref/source/v2/samples/metadef-tag-details-response.json api-ref/source/v2/samples/metadef-tag-update-request.json api-ref/source/v2/samples/metadef-tag-update-response.json api-ref/source/v2/samples/metadef-tags-create-request.json api-ref/source/v2/samples/metadef-tags-create-response.json api-ref/source/v2/samples/metadef-tags-list-response.json api-ref/source/v2/samples/schemas-image-member-show-response.json api-ref/source/v2/samples/schemas-image-members-list-response.json api-ref/source/v2/samples/schemas-image-show-response.json api-ref/source/v2/samples/schemas-images-list-response.json api-ref/source/v2/samples/schemas-metadef-namespace-show-response.json api-ref/source/v2/samples/schemas-metadef-namespaces-list-response.json api-ref/source/v2/samples/schemas-metadef-object-show-response.json api-ref/source/v2/samples/schemas-metadef-objects-list-response.json api-ref/source/v2/samples/schemas-metadef-properties-list-response.json api-ref/source/v2/samples/schemas-metadef-property-show-response.json api-ref/source/v2/samples/schemas-metadef-resource-type-association-show-response.json api-ref/source/v2/samples/schemas-metadef-resource-type-associations-list-response.json api-ref/source/v2/samples/schemas-metadef-tag-show-response.json api-ref/source/v2/samples/schemas-metadef-tags-list-response.json api-ref/source/v2/samples/schemas-task-show-response.json api-ref/source/v2/samples/schemas-tasks-list-response.json api-ref/source/v2/samples/stores-list-detail-response.json api-ref/source/v2/samples/stores-list-response.json api-ref/source/v2/samples/task-create-request.json api-ref/source/v2/samples/task-create-response.json api-ref/source/v2/samples/task-show-failure-response.json api-ref/source/v2/samples/task-show-processing-response.json api-ref/source/v2/samples/task-show-success-response.json api-ref/source/v2/samples/tasks-list-response.json api-ref/source/v2/samples/usage-response.json api-ref/source/versions/index.rst api-ref/source/versions/versions.inc api-ref/source/versions/samples/image-versions-response.json doc/requirements.txt doc/source/conf.py doc/source/deprecation-note.inc doc/source/index.rst doc/source/_extra/.htaccess doc/source/_static/.placeholder doc/source/admin/apache-httpd.rst doc/source/admin/authentication.rst doc/source/admin/cache.rst doc/source/admin/controllingservers.rst doc/source/admin/db-sqlalchemy-migrate.rst doc/source/admin/db.rst doc/source/admin/flows.rst doc/source/admin/index.rst doc/source/admin/interoperable-image-import.rst doc/source/admin/manage-images.rst doc/source/admin/multistores.rst doc/source/admin/new-location-apis.rst doc/source/admin/notifications.rst doc/source/admin/os_hash_algo.rst doc/source/admin/policies.rst doc/source/admin/property-protections.rst doc/source/admin/quotas.rst doc/source/admin/requirements.rst doc/source/admin/rollingupgrades.rst doc/source/admin/tasks.rst doc/source/admin/troubleshooting.rst doc/source/admin/useful-image-properties.rst doc/source/admin/zero-downtime-db-upgrade.rst doc/source/cli/footer.txt doc/source/cli/general_options.txt doc/source/cli/glanceapi.rst doc/source/cli/glancecachecleaner.rst doc/source/cli/glancecachemanage.rst doc/source/cli/glancecacheprefetcher.rst doc/source/cli/glancecachepruner.rst doc/source/cli/glancecontrol.rst doc/source/cli/glancemanage.rst doc/source/cli/glancereplicator.rst doc/source/cli/glancescrubber.rst doc/source/cli/glancestatus.rst doc/source/cli/header.txt doc/source/cli/index.rst doc/source/cli/openstack_options.txt doc/source/configuration/configuring.rst doc/source/configuration/glance_api.rst doc/source/configuration/glance_cache.rst doc/source/configuration/glance_manage.rst doc/source/configuration/glance_policy.rst doc/source/configuration/glance_scrubber.rst doc/source/configuration/index.rst doc/source/configuration/sample-configuration.rst doc/source/contributor/architecture.rst doc/source/contributor/blueprints.rst doc/source/contributor/contributing.rst doc/source/contributor/core_reviewer_guidelines.rst doc/source/contributor/database_architecture.rst doc/source/contributor/database_migrations.rst doc/source/contributor/documentation.rst doc/source/contributor/domain_implementation.rst doc/source/contributor/domain_model.rst doc/source/contributor/gerrit.rst doc/source/contributor/glance-groups.rst doc/source/contributor/index.rst doc/source/contributor/minor-code-changes.rst doc/source/contributor/refreshing-configs.rst doc/source/contributor/release-cpl.rst doc/source/contributor/release-notes.rst doc/source/contributor/releasecycle.rst doc/source/images/architecture.png doc/source/images/glance_db.png doc/source/images/glance_layers.png doc/source/images/image_status_transition.png doc/source/images/instance-life-1.png doc/source/images/instance-life-2.png doc/source/images/instance-life-3.png doc/source/images_src/architecture.graphml doc/source/images_src/glance_db.graphml doc/source/images_src/glance_layers.graphml doc/source/images_src/image_status_transition.dot doc/source/install/configure-quotas.rst doc/source/install/edit-glance-api-conf.rst doc/source/install/get-started.rst doc/source/install/index.rst doc/source/install/install-debian.rst doc/source/install/install-obs.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/note_configuration_vary_by_distribution.txt doc/source/install/register-quotas.rst doc/source/install/verify.rst doc/source/user/common-image-properties.rst doc/source/user/formats.rst doc/source/user/glanceapi.rst doc/source/user/glanceclient.rst doc/source/user/glancemetadefcatalogapi.rst doc/source/user/identifiers.rst doc/source/user/index.rst doc/source/user/metadefs-concepts.rst doc/source/user/os_hash_algo.rst doc/source/user/signature.rst doc/source/user/statuses.rst doc/test/redirect-tests.txt etc/glance-api-paste.ini etc/glance-api.conf etc/glance-cache.conf etc/glance-image-import.conf.sample etc/glance-manage.conf etc/glance-policy-generator.conf etc/glance-scrubber.conf etc/glance-swift.conf.sample etc/ovf-metadata.json.sample etc/property-protections-policies.conf.sample etc/property-protections-roles.conf.sample etc/schema-image.json etc/metadefs/README etc/metadefs/cim-processor-allocation-setting-data.json etc/metadefs/cim-resource-allocation-setting-data.json etc/metadefs/cim-storage-allocation-setting-data.json etc/metadefs/cim-virtual-system-setting-data.json etc/metadefs/compute-aggr-disk-filter.json etc/metadefs/compute-aggr-iops-filter.json etc/metadefs/compute-aggr-num-instances.json etc/metadefs/compute-cpu-mode.json etc/metadefs/compute-cpu-pinning.json etc/metadefs/compute-guest-memory-backing.json etc/metadefs/compute-guest-shutdown.json etc/metadefs/compute-host-capabilities.json etc/metadefs/compute-hypervisor.json etc/metadefs/compute-instance-data.json etc/metadefs/compute-libvirt-image.json etc/metadefs/compute-libvirt.json etc/metadefs/compute-quota.json etc/metadefs/compute-randomgen.json etc/metadefs/compute-vcputopology.json etc/metadefs/compute-vmware-flavor.json etc/metadefs/compute-vmware-quota-flavor.json etc/metadefs/compute-vmware.json etc/metadefs/compute-vtpm-hw.json etc/metadefs/compute-vtpm.json etc/metadefs/compute-watchdog.json etc/metadefs/compute-xenapi.json etc/metadefs/glance-common-image-props.json etc/metadefs/image-signature-verification.json etc/metadefs/operating-system.json etc/metadefs/software-databases.json etc/metadefs/software-runtimes.json etc/metadefs/software-webservers.json etc/metadefs/storage-volume-type.json etc/oslo-config-generator/glance-api.conf etc/oslo-config-generator/glance-cache.conf etc/oslo-config-generator/glance-image-import.conf etc/oslo-config-generator/glance-manage.conf etc/oslo-config-generator/glance-scrubber.conf glance/__init__.py glance/context.py glance/gateway.py glance/housekeeping.py glance/i18n.py glance/location.py glance/notifier.py glance/opts.py glance/schema.py glance/scrubber.py glance/sqlite_migration.py glance/version.py glance.egg-info/PKG-INFO glance.egg-info/SOURCES.txt glance.egg-info/dependency_links.txt glance.egg-info/entry_points.txt glance.egg-info/not-zip-safe glance.egg-info/pbr.json glance.egg-info/requires.txt glance.egg-info/top_level.txt glance/api/__init__.py glance/api/common.py glance/api/policy.py glance/api/property_protections.py glance/api/versions.py glance/api/middleware/__init__.py glance/api/middleware/cache.py glance/api/middleware/cache_manage.py glance/api/middleware/context.py glance/api/middleware/gzip.py glance/api/middleware/version_negotiation.py glance/api/v1/__init__.py glance/api/v1/router.py glance/api/v2/__init__.py glance/api/v2/cached_images.py glance/api/v2/discovery.py glance/api/v2/image_actions.py glance/api/v2/image_data.py glance/api/v2/image_members.py glance/api/v2/image_tags.py glance/api/v2/images.py glance/api/v2/metadef_namespaces.py glance/api/v2/metadef_objects.py glance/api/v2/metadef_properties.py glance/api/v2/metadef_resource_types.py glance/api/v2/metadef_tags.py glance/api/v2/policy.py glance/api/v2/router.py glance/api/v2/schemas.py glance/api/v2/tasks.py glance/api/v2/model/__init__.py glance/api/v2/model/metadef_namespace.py glance/api/v2/model/metadef_object.py glance/api/v2/model/metadef_property_item_type.py glance/api/v2/model/metadef_property_type.py glance/api/v2/model/metadef_resource_type.py glance/api/v2/model/metadef_tag.py glance/async_/__init__.py glance/async_/taskflow_executor.py glance/async_/utils.py glance/async_/flows/__init__.py glance/async_/flows/api_image_import.py glance/async_/flows/base_import.py glance/async_/flows/convert.py glance/async_/flows/introspect.py glance/async_/flows/location_import.py glance/async_/flows/ovf_process.py glance/async_/flows/_internal_plugins/__init__.py glance/async_/flows/_internal_plugins/base_download.py glance/async_/flows/_internal_plugins/copy_image.py glance/async_/flows/_internal_plugins/glance_download.py glance/async_/flows/_internal_plugins/web_download.py glance/async_/flows/plugins/__init__.py glance/async_/flows/plugins/image_conversion.py glance/async_/flows/plugins/image_decompression.py glance/async_/flows/plugins/inject_image_metadata.py glance/async_/flows/plugins/no_op.py glance/async_/flows/plugins/plugin_opts.py glance/cmd/__init__.py glance/cmd/api.py glance/cmd/cache_cleaner.py glance/cmd/cache_manage.py glance/cmd/cache_prefetcher.py glance/cmd/cache_pruner.py glance/cmd/control.py glance/cmd/manage.py glance/cmd/replicator.py glance/cmd/scrubber.py glance/cmd/status.py glance/common/__init__.py glance/common/auth.py glance/common/client.py glance/common/config.py glance/common/crypt.py glance/common/exception.py glance/common/format_inspector.py glance/common/property_utils.py glance/common/removed_config.py glance/common/store_utils.py glance/common/swift_store_utils.py glance/common/timeutils.py glance/common/trust_auth.py glance/common/utils.py glance/common/wsgi.py glance/common/wsgi_app.py glance/common/wsme_utils.py glance/common/scripts/__init__.py glance/common/scripts/utils.py glance/common/scripts/api_image_import/__init__.py glance/common/scripts/api_image_import/main.py glance/common/scripts/image_import/__init__.py glance/common/scripts/image_import/main.py glance/db/__init__.py glance/db/metadata.py glance/db/migration.py glance/db/utils.py glance/db/simple/__init__.py glance/db/simple/api.py glance/db/sqlalchemy/__init__.py glance/db/sqlalchemy/api.py glance/db/sqlalchemy/metadata.py glance/db/sqlalchemy/models.py glance/db/sqlalchemy/models_metadef.py glance/db/sqlalchemy/schema.py glance/db/sqlalchemy/alembic_migrations/README glance/db/sqlalchemy/alembic_migrations/__init__.py glance/db/sqlalchemy/alembic_migrations/add_artifacts_tables.py glance/db/sqlalchemy/alembic_migrations/add_images_tables.py glance/db/sqlalchemy/alembic_migrations/add_metadefs_tables.py glance/db/sqlalchemy/alembic_migrations/add_tasks_tables.py glance/db/sqlalchemy/alembic_migrations/alembic.ini glance/db/sqlalchemy/alembic_migrations/env.py glance/db/sqlalchemy/alembic_migrations/migrate.cfg glance/db/sqlalchemy/alembic_migrations/script.py.mako glance/db/sqlalchemy/alembic_migrations/data_migrations/2023_1_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/2024_1_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/__init__.py glance/db/sqlalchemy/alembic_migrations/data_migrations/ocata_migrate01_community_images.py glance/db/sqlalchemy/alembic_migrations/data_migrations/pike_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/queens_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/rocky_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/rocky_migrate02_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/train_migrate01_backend_to_store.py glance/db/sqlalchemy/alembic_migrations/data_migrations/ussuri_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/wallaby_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/xena_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/yoga_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/data_migrations/zed_migrate01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/2023_1_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/2023_1_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/2024_1_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/2024_1_expand01_add_cache_tables.py glance/db/sqlalchemy/alembic_migrations/versions/__init__.py glance/db/sqlalchemy/alembic_migrations/versions/liberty_initial.py glance/db/sqlalchemy/alembic_migrations/versions/mitaka01_add_image_created_updated_idx.py glance/db/sqlalchemy/alembic_migrations/versions/mitaka02_update_metadef_os_nova_server.py glance/db/sqlalchemy/alembic_migrations/versions/ocata_contract01_drop_is_public.py glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py glance/db/sqlalchemy/alembic_migrations/versions/pike_contract01_drop_artifacts_tables.py glance/db/sqlalchemy/alembic_migrations/versions/pike_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/queens_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/queens_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/rocky_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/rocky_contract02_empty.py glance/db/sqlalchemy/alembic_migrations/versions/rocky_expand01_add_os_hidden.py glance/db/sqlalchemy/alembic_migrations/versions/rocky_expand02_add_os_hash_.py glance/db/sqlalchemy/alembic_migrations/versions/train_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/train_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/ussuri_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/ussuri_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/wallaby_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/wallaby_expand01_add_user_imageid_requestid_to_tasks.py glance/db/sqlalchemy/alembic_migrations/versions/xena_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/xena_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/yoga_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/yoga_expand01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/zed_contract01_empty.py glance/db/sqlalchemy/alembic_migrations/versions/zed_expand01_empty.py glance/db/sqlalchemy/metadef_api/__init__.py glance/db/sqlalchemy/metadef_api/namespace.py glance/db/sqlalchemy/metadef_api/object.py glance/db/sqlalchemy/metadef_api/property.py glance/db/sqlalchemy/metadef_api/resource_type.py glance/db/sqlalchemy/metadef_api/resource_type_association.py glance/db/sqlalchemy/metadef_api/tag.py glance/db/sqlalchemy/metadef_api/utils.py glance/domain/__init__.py glance/domain/proxy.py glance/hacking/__init__.py glance/hacking/checks.py glance/image_cache/__init__.py glance/image_cache/base.py glance/image_cache/cleaner.py glance/image_cache/client.py glance/image_cache/prefetcher.py glance/image_cache/pruner.py glance/image_cache/drivers/__init__.py glance/image_cache/drivers/base.py glance/image_cache/drivers/centralized_db.py glance/image_cache/drivers/common.py glance/image_cache/drivers/sqlite.py glance/image_cache/drivers/xattr.py glance/locale/de/LC_MESSAGES/glance.po glance/locale/en_GB/LC_MESSAGES/glance.po glance/locale/es/LC_MESSAGES/glance.po glance/locale/fr/LC_MESSAGES/glance.po glance/locale/it/LC_MESSAGES/glance.po glance/locale/ja/LC_MESSAGES/glance.po glance/locale/ko_KR/LC_MESSAGES/glance.po glance/locale/pt_BR/LC_MESSAGES/glance.po glance/locale/ru/LC_MESSAGES/glance.po glance/locale/tr_TR/LC_MESSAGES/glance.po glance/locale/zh_CN/LC_MESSAGES/glance.po glance/locale/zh_TW/LC_MESSAGES/glance.po glance/policies/__init__.py glance/policies/base.py glance/policies/cache.py glance/policies/discovery.py glance/policies/image.py glance/policies/metadef.py glance/policies/tasks.py glance/quota/__init__.py glance/quota/keystone.py glance/tests/__init__.py glance/tests/stubs.py glance/tests/test_hacking.py glance/tests/utils.py glance/tests/etc/glance-swift.conf glance/tests/etc/policy.yaml glance/tests/etc/property-protections-policies.conf glance/tests/etc/property-protections.conf glance/tests/etc/schema-image.json glance/tests/functional/__init__.py glance/tests/functional/ft_utils.py glance/tests/functional/store_utils.py glance/tests/functional/test_api.py glance/tests/functional/test_cache_middleware.py glance/tests/functional/test_client_exceptions.py glance/tests/functional/test_client_redirects.py glance/tests/functional/test_cors_middleware.py glance/tests/functional/test_glance_manage.py glance/tests/functional/test_gzip_middleware.py glance/tests/functional/test_healthcheck_middleware.py glance/tests/functional/test_logging.py glance/tests/functional/test_reload.py glance/tests/functional/test_sqlite.py glance/tests/functional/test_wsgi.py glance/tests/functional/db/__init__.py glance/tests/functional/db/base.py glance/tests/functional/db/base_metadef.py glance/tests/functional/db/test_migrations.py glance/tests/functional/db/test_sqlalchemy.py glance/tests/functional/db/migrations/__init__.py glance/tests/functional/db/migrations/test_2024_1_expand01.py glance/tests/functional/db/migrations/test_mitaka01.py glance/tests/functional/db/migrations/test_mitaka02.py glance/tests/functional/db/migrations/test_ocata_contract01.py glance/tests/functional/db/migrations/test_ocata_expand01.py glance/tests/functional/db/migrations/test_ocata_migrate01.py glance/tests/functional/db/migrations/test_pike_contract01.py glance/tests/functional/db/migrations/test_pike_expand01.py glance/tests/functional/db/migrations/test_pike_migrate01.py glance/tests/functional/db/migrations/test_rocky_expand01.py glance/tests/functional/db/migrations/test_rocky_expand02.py glance/tests/functional/db/migrations/test_train_migrate01.py glance/tests/functional/db/migrations/test_wallaby_expand01.py glance/tests/functional/image_cache/__init__.py glance/tests/functional/image_cache/drivers/__init__.py glance/tests/functional/image_cache/drivers/test_centralized_db.py glance/tests/functional/serial/__init__.py glance/tests/functional/serial/test_scrubber.py glance/tests/functional/v2/__init__.py glance/tests/functional/v2/metadef_base.py glance/tests/functional/v2/test_cache_api.py glance/tests/functional/v2/test_cache_api_policy.py glance/tests/functional/v2/test_discovery.py glance/tests/functional/v2/test_images.py glance/tests/functional/v2/test_images_api_policy.py glance/tests/functional/v2/test_images_import_locking.py glance/tests/functional/v2/test_legacy_update_cinder_store.py glance/tests/functional/v2/test_member_api_policy.py glance/tests/functional/v2/test_metadef_namespace_api_policy.py glance/tests/functional/v2/test_metadef_namespaces.py glance/tests/functional/v2/test_metadef_object_api_policy.py glance/tests/functional/v2/test_metadef_objects.py glance/tests/functional/v2/test_metadef_properties.py glance/tests/functional/v2/test_metadef_property_api_policy.py glance/tests/functional/v2/test_metadef_resourcetype_api_policy.py glance/tests/functional/v2/test_metadef_resourcetypes.py glance/tests/functional/v2/test_metadef_tag_api_policy.py glance/tests/functional/v2/test_metadef_tags.py glance/tests/functional/v2/test_schemas.py glance/tests/functional/v2/test_tasks.py glance/tests/functional/v2/test_tasks_api_policy.py glance/tests/integration/__init__.py glance/tests/integration/v2/__init__.py glance/tests/integration/v2/base.py glance/tests/integration/v2/test_property_quota_violations.py glance/tests/integration/v2/test_tasks_api.py glance/tests/unit/__init__.py glance/tests/unit/base.py glance/tests/unit/fake_rados.py glance/tests/unit/fixtures.py glance/tests/unit/test_auth.py glance/tests/unit/test_cache_manage.py glance/tests/unit/test_cache_middleware.py glance/tests/unit/test_cached_images.py glance/tests/unit/test_context.py glance/tests/unit/test_context_middleware.py glance/tests/unit/test_data_migration_framework.py glance/tests/unit/test_db.py glance/tests/unit/test_db_metadef.py glance/tests/unit/test_domain.py glance/tests/unit/test_domain_proxy.py glance/tests/unit/test_gateway.py glance/tests/unit/test_glance_manage.py glance/tests/unit/test_glance_replicator.py glance/tests/unit/test_housekeeping.py glance/tests/unit/test_image_cache.py glance/tests/unit/test_manage.py glance/tests/unit/test_misc.py glance/tests/unit/test_notifier.py glance/tests/unit/test_policy.py glance/tests/unit/test_quota.py glance/tests/unit/test_schema.py glance/tests/unit/test_scrubber.py glance/tests/unit/test_sqlite_migration.py glance/tests/unit/test_store_image.py glance/tests/unit/test_store_location.py glance/tests/unit/test_test_utils.py glance/tests/unit/test_versions.py glance/tests/unit/utils.py glance/tests/unit/api/__init__.py glance/tests/unit/api/test_cmd.py glance/tests/unit/api/test_common.py glance/tests/unit/api/test_property_protections.py glance/tests/unit/api/middleware/__init__.py glance/tests/unit/api/middleware/test_cache_manage.py glance/tests/unit/async_/__init__.py glance/tests/unit/async_/test_async.py glance/tests/unit/async_/test_taskflow_executor.py glance/tests/unit/async_/test_utils.py glance/tests/unit/async_/flows/__init__.py glance/tests/unit/async_/flows/test_api_image_import.py glance/tests/unit/async_/flows/test_base_download.py glance/tests/unit/async_/flows/test_convert.py glance/tests/unit/async_/flows/test_copy_image.py glance/tests/unit/async_/flows/test_glance_download.py glance/tests/unit/async_/flows/test_import.py glance/tests/unit/async_/flows/test_introspect.py glance/tests/unit/async_/flows/test_location_import.py glance/tests/unit/async_/flows/test_ovf_process.py glance/tests/unit/async_/flows/test_web_download.py glance/tests/unit/async_/flows/plugins/__init__.py glance/tests/unit/async_/flows/plugins/test_image_conversion.py glance/tests/unit/async_/flows/plugins/test_inject_image_metadata.py glance/tests/unit/cmd/__init__.py glance/tests/unit/cmd/test_status.py glance/tests/unit/common/__init__.py glance/tests/unit/common/test_client.py glance/tests/unit/common/test_config.py glance/tests/unit/common/test_exception.py glance/tests/unit/common/test_format_inspector.py glance/tests/unit/common/test_property_utils.py glance/tests/unit/common/test_scripts.py glance/tests/unit/common/test_swift_store_utils.py glance/tests/unit/common/test_timeutils.py glance/tests/unit/common/test_utils.py glance/tests/unit/common/test_wsgi.py glance/tests/unit/common/test_wsgi_app.py glance/tests/unit/common/scripts/__init__.py glance/tests/unit/common/scripts/test_scripts_utils.py glance/tests/unit/common/scripts/image_import/__init__.py glance/tests/unit/common/scripts/image_import/test_main.py glance/tests/unit/image_cache/__init__.py glance/tests/unit/image_cache/drivers/__init__.py glance/tests/unit/image_cache/drivers/test_sqlite.py glance/tests/unit/keymgr/__init__.py glance/tests/unit/keymgr/fake.py glance/tests/unit/v2/__init__.py glance/tests/unit/v2/test_cache_management_api.py glance/tests/unit/v2/test_discovery_image_import.py glance/tests/unit/v2/test_discovery_stores.py glance/tests/unit/v2/test_image_actions_resource.py glance/tests/unit/v2/test_image_data_resource.py glance/tests/unit/v2/test_image_members_resource.py glance/tests/unit/v2/test_image_tags_resource.py glance/tests/unit/v2/test_images_resource.py glance/tests/unit/v2/test_metadef_resources.py glance/tests/unit/v2/test_schemas_resource.py glance/tests/unit/v2/test_tasks_resource.py glance/tests/unit/v2/test_v2_policy.py glance/tests/var/ca.crt glance/tests/var/ca.key glance/tests/var/certificate.crt glance/tests/var/privatekey.key glance/tests/var/testserver-bad-ovf.ova glance/tests/var/testserver-no-disk.ova glance/tests/var/testserver-no-ovf.ova glance/tests/var/testserver-not-tar.ova glance/tests/var/testserver.ova httpd/README httpd/glance-api-uwsgi.ini httpd/uwsgi-glance-api.conf playbooks/enable-fips.yaml playbooks/post-check-metadata-injection.yaml rally-jobs/README.rst rally-jobs/glance.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst releasenotes/notes/.placeholder releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml releasenotes/notes/Stein-reno-rc1-0a03f8394934a2e7.yaml releasenotes/notes/Train-milestone3-be5520106a182fa0.yaml releasenotes/notes/add-all-visibility-image-filter-ea2f3948ff778fe3.yaml releasenotes/notes/add-cli-and-cache-opts-902f28d65c8fb827.yaml releasenotes/notes/add-compressed-format-185e537187a202bd.yaml releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml releasenotes/notes/add-description-common-image-property-95ab1139d41579d2.yaml releasenotes/notes/add-glance-download-method-be6d9e927b8b0a43.yaml releasenotes/notes/add-new-add-location-api-acd459299976b4a5.yaml releasenotes/notes/add-new-get-locations-api-83c4b6dc077efc5f.yaml releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml releasenotes/notes/add_capability_to_purge_all_deleted_rows-7b3b9b767669b1a5.yaml releasenotes/notes/add_policy_enforcement_for_metadef_delete_apis-95d2b16cc444840a.yaml releasenotes/notes/added-quota-usage-api-f1914054132f2021.yaml releasenotes/notes/added-store-detail-api-215810aa85dfbb99.yaml releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml releasenotes/notes/antelope-milestone-2-d89e39412f9c0334.yaml releasenotes/notes/antelope-milestone-3-b9a4f7fdba31f628.yaml releasenotes/notes/api-2-6-current-9eeb83b7ecc0a562.yaml releasenotes/notes/api-2.16-8417b1e23322fedb.yaml releasenotes/notes/api-minor-ver-bump-2-6-aa3591fc58f08055.yaml releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml releasenotes/notes/bobcat-milestone-1-releasenotes-2d109105530877d6.yaml releasenotes/notes/bobcat-milestone-2-releasenotes-085084b03f66d671.yaml releasenotes/notes/bp-barbican-secret-deletion-support-40cffa5ffa33447e.yaml releasenotes/notes/bp-inject-image-metadata-0a08af539bcce7f2.yaml releasenotes/notes/bp-mitigate-ossn-0075-c0e74e60d86d8ea2.yaml releasenotes/notes/bp-upgrade-checks-b3272c3ddb4e8cf7.yaml releasenotes/notes/bp-virtio-packed-ring-configuration-support-0cd0333c1c52c02b.yaml releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml releasenotes/notes/bug-1719252-name-validation-443a2e2a36be2cec.yaml releasenotes/notes/bug-1861334-ebc2026b85675d47.yaml releasenotes/notes/bug-1881958-d0e16538f3c0ffaa.yaml releasenotes/notes/bug-1979699-70182ec2aead0383.yaml releasenotes/notes/bug-1980049-623d2eb0fa074136.yaml releasenotes/notes/bug-2059809-disallow-qcow2-datafile-5d5ff4dbd590c911.yaml releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml releasenotes/notes/cache-api-b806ccfb8c5d9bb6.yaml releasenotes/notes/caracal-milestone-3-releasenotes-534b1daa3e1f254c.yaml releasenotes/notes/cinder-store-migration-non-owner-80a2a8114d8602aa.yaml releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml releasenotes/notes/cleanout_registry_data-api-9d91368aed83497e.yaml releasenotes/notes/cleanup-enable_v2_api-9b9b467f4ae8c3b1.yaml releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml releasenotes/notes/copy-existing-image-94fd0b8d24bc16a0.yaml releasenotes/notes/dalmatian-metadef-changes-272f78e019a15ff1.yaml releasenotes/notes/dalmatian-milestone-1-releasenotes-45150bc42aead80d.yaml releasenotes/notes/dalmatian-milestone-2-releasenotes-a35ccfe1cbf95fce.yaml releasenotes/notes/dalmatian-milestone-3-76c9712862b6c889.yaml releasenotes/notes/delete_from_store-a1d9b9bd5cf27546.yaml releasenotes/notes/deprecate-admin_role-2f9d33ed0785d082.yaml releasenotes/notes/deprecate-allow_additional_image_props-0e3b2f1ffa4e55e1.yaml releasenotes/notes/deprecate-checksum-a602853403e1c4a8.yaml releasenotes/notes/deprecate-digest_algorithm-7cab4ef4240c522f.yaml releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml releasenotes/notes/deprecate-glance-cache-manage-c88f07d33fcc7ca5.yaml releasenotes/notes/deprecate-json-formatted-policy-file-5cb692fe889eb52b.yaml releasenotes/notes/deprecate-location_strategy-f658e69700204bbf.yaml releasenotes/notes/deprecate-owner_is_tenant-ec8ea36a3f7e9268.yaml releasenotes/notes/deprecate-registry-ff286df90df793f0.yaml releasenotes/notes/deprecate-scrubber-862c38e0d65557f3.yaml releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml releasenotes/notes/deprecate-show-multiple-location-continued-646f91b21cd771f7.yaml releasenotes/notes/deprecate-show-multiple-location-continued-ussuri-16e8d9d8a59da1bc.yaml releasenotes/notes/deprecate-sqlite-cache-driver-1f5f67862f56e0ba.yaml releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml releasenotes/notes/deprecate-windows-support-557481e4d45912ee.yaml releasenotes/notes/deprecate_metadata_encryption_key_option-8c6076ca6e361f92.yaml releasenotes/notes/distributed-image-import-82cff4426731beac.yaml releasenotes/notes/do-not-load-paste-ini-1ec473693037ee5b.yaml releasenotes/notes/drop-py-2-7-863871c7bc047146.yaml releasenotes/notes/drop-python-3-6-and-3-7-c6f051d5b2b40329.yaml releasenotes/notes/drop-sheepdog-b55aae84807d31d9.yaml releasenotes/notes/drop-support-for-sqlalchemy-migrate-4bcbe7b200697586.yaml releasenotes/notes/enable-enforce-scope-and-new-defaults-ef543183e6c2eabb.yaml releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml releasenotes/notes/expanding-stores-details-d3aa8ebb76ad68d9.yaml releasenotes/notes/experimental-multi-store-d2c26f9dbb9c835b.yaml releasenotes/notes/fix-md-tag-create-multiple-c04756cf5155983d.yaml releasenotes/notes/fix-set-acls-bc17b5e125425c9b.yaml releasenotes/notes/fix_1889640-95d543629d7dadce.yaml releasenotes/notes/fix_1889676-f8d302fd240c8a57.yaml releasenotes/notes/fix_httpd_docs-3efff0395f96a94d.yaml releasenotes/notes/glance-unified-quotas-fba62fabb00379af.yaml releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml releasenotes/notes/image-conversion-plugin-5aee45e1a1a5bb2b.yaml releasenotes/notes/image-not-found-policy-override-removed-52616c483a270bcf.yaml releasenotes/notes/image-tasks-api-f21b42eab91c2079.yaml releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml releasenotes/notes/image_decompression_plugin-5f085666aae01f29.yaml releasenotes/notes/immediate-caching-image-e38055575c361d32.yaml releasenotes/notes/implement-lite-spec-db-sync-check-3e2e147aec0ae82b.yaml releasenotes/notes/import-locking-behavior-901c691f3839fe0a.yaml releasenotes/notes/import-multi-stores-3e781f2878b3134d.yaml releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml releasenotes/notes/make-cinder-url-compatible-with-locations-1f3e938ff7e11c7d.yaml releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml releasenotes/notes/metadef-api-admin-operations-b9a2d863913b0cae.yaml releasenotes/notes/multihash-081466a98601da20.yaml releasenotes/notes/mutistore-support-for-scrubber-6b360394ef32774a.yaml releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml releasenotes/notes/newton-1-release-065334d464f78fc5.yaml releasenotes/notes/newton-bugs-06ed3727b973c271.yaml releasenotes/notes/no_plugins_for_copy-image-26c0e384a368bf6a.yaml releasenotes/notes/os-glance-injection-disallowed-5dad244dfb071938.yaml releasenotes/notes/os-glance-namespace-reserved-1fcb8a5fddca4e0f.yaml releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml releasenotes/notes/pending-delete-rollback-444ff94c0056bbdb.yaml releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml releasenotes/notes/pike-rc-1-a5d3f6e8877b52c6.yaml releasenotes/notes/pike-rc-2-acc173005045e16a.yaml releasenotes/notes/policy-in-code-7e0c6c070d32d136.yaml releasenotes/notes/policy-in-code-implications-438449a73af2893c.yaml releasenotes/notes/policy-refactor-xena-0cddb7f2d492cb3a.yaml releasenotes/notes/queens-metadefs-changes-daf02bef18d049f4.yaml releasenotes/notes/queens-uwsgi-issues-4cee9e4fdf62c646.yaml releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml releasenotes/notes/rbac-updates-ba0fcb886fe4085c.yaml releasenotes/notes/remove-admin_role-f508754e98331fc4.yaml releasenotes/notes/remove-allow_additional_image_properties-ae33902e7967661f.yaml releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml releasenotes/notes/remove-enforce-secure-rbac-ec9a0249870460c2.yaml releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml releasenotes/notes/remove-owner_is_tenant-b30150def293effc.yaml releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml releasenotes/notes/remove_enable_image_import_option-ec4a859ac9a7ea7b.yaml releasenotes/notes/remove_native_ssl-c16d5a127b57583d.yaml releasenotes/notes/remove_secure_proxy_ssl_header-2a95ad48ffa471ad.yaml releasenotes/notes/removed-location-strategy-functionality-b1b562e68608a6f8.yaml releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml releasenotes/notes/replicator-token-cleanup-4a573c86f1acccc0.yaml releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml releasenotes/notes/rethinking-filesystem-access-120bc46064b3d40a.yaml releasenotes/notes/rocky-metadefs-changes-cb00c006ff51b541.yaml releasenotes/notes/rocky-rc-b0ea7628b7a74c96.yaml releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml releasenotes/notes/scrubber-refactor-73ddbd61ebbf1e86.yaml releasenotes/notes/secure-rbac-project-personas-fb0d9792b9dc3783.yaml releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml releasenotes/notes/store-weight-3ed3ee612579bc25.yaml releasenotes/notes/support-cinder-multiple-stores-eb4e6d912d549ee9.yaml releasenotes/notes/train-metadefs-changes-c4380754cdd13a19.yaml releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml releasenotes/notes/use-cursive-c6b15d94845232da.yaml releasenotes/notes/use-webob-1.8.1-5c3cd1b1382f063e.yaml releasenotes/notes/ussuri-final-b377a21508ada060.yaml releasenotes/notes/victoria-m2-release-notes-8a6ae2fdb3d29dae.yaml releasenotes/notes/victoria-m3-releasenotes-9209cea98a29abc4.yaml releasenotes/notes/victoria-rc1-release-notes-d928355cf90d608d.yaml releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml releasenotes/notes/wallaby-m3-releasenotes-bdc9fe6938aba8cc.yaml releasenotes/notes/windows-support-f4aae61681dba569.yaml releasenotes/notes/wsgi-containerization-369880238a5e793d.yaml releasenotes/notes/xena-m2-releasenotes-e68fd81ece1d514a.yaml releasenotes/notes/xena-m3-releasenotes-a92d55d29eecc8f6.yaml releasenotes/notes/xena-rc1-release-notes-12dbe0ac528ce483.yaml releasenotes/notes/yoga-rc1-release-notes-153932161f52a038.yaml releasenotes/notes/zed-milestone-1-592415040e67924e.yaml releasenotes/notes/zed-milestone-2-a782e75cdbd8fe13.yaml releasenotes/notes/zed-milestone-3-3e38697ae4677a81.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po tools/test-setup.sh tools/test_format_inspector.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022047 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/entry_points.txt0000664000175000017500000000374700000000000021312 0ustar00zuulzuul00000000000000[console_scripts] glance-api = glance.cmd.api:main glance-cache-cleaner = glance.cmd.cache_cleaner:main glance-cache-manage = glance.cmd.cache_manage:main glance-cache-prefetcher = glance.cmd.cache_prefetcher:main glance-cache-pruner = glance.cmd.cache_pruner:main glance-control = glance.cmd.control:main glance-manage = glance.cmd.manage:main glance-replicator = glance.cmd.replicator:main glance-scrubber = glance.cmd.scrubber:main glance-status = glance.cmd.status:main [glance.database.metadata_backend] sqlalchemy = glance.db.sqlalchemy.metadata [glance.flows] api_image_import = glance.async_.flows.api_image_import:get_flow import = glance.async_.flows.base_import:get_flow location_import = glance.async_.flows.location_import:get_flow [glance.flows.import] convert = glance.async_.flows.convert:get_flow introspect = glance.async_.flows.introspect:get_flow ovf_process = glance.async_.flows.ovf_process:get_flow [glance.image_import.internal_plugins] copy_image = glance.async_.flows._internal_plugins.copy_image:get_flow glance_download = glance.async_.flows._internal_plugins.glance_download:get_flow web_download = glance.async_.flows._internal_plugins.web_download:get_flow [glance.image_import.plugins] image_conversion = glance.async_.flows.plugins.image_conversion:get_flow image_decompression = glance.async_.flows.plugins.image_decompression:get_flow inject_image_metadata = glance.async_.flows.plugins.inject_image_metadata:get_flow no_op = glance.async_.flows.plugins.no_op:get_flow [oslo.config.opts] glance = glance.opts:list_image_import_opts glance.api = glance.opts:list_api_opts glance.cache = glance.opts:list_cache_opts glance.manage = glance.opts:list_manage_opts glance.scrubber = glance.opts:list_scrubber_opts [oslo.config.opts.defaults] glance.api = glance.common.config:set_config_defaults [oslo.policy.enforcer] glance = glance.api.policy:get_enforcer [oslo.policy.policies] glance = glance.policies:list_rules [wsgi_scripts] glance-wsgi-api = glance.common.wsgi_app:init_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/not-zip-safe0000664000175000017500000000000100000000000020227 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/pbr.json0000664000175000017500000000005700000000000017461 0ustar00zuulzuul00000000000000{"git_version": "3ea17a13", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/requires.txt0000664000175000017500000000140500000000000020401 0ustar00zuulzuul00000000000000Paste>=2.0.2 PasteDeploy>=1.5.0 PrettyTable>=0.7.1 Routes>=2.3.1 SQLAlchemy>=1.3.14 WSME>=0.8.0 WebOb>=1.8.1 alembic>=0.9.6 castellan>=0.17.0 cryptography>=2.6.1 cursive>=0.2.1 debtcollector>=1.19.0 defusedxml>=0.7.1 eventlet>=0.33.3 futurist>=1.2.0 glance-store>=2.3.0 httplib2>=0.9.1 iso8601>=0.1.11 jsonschema>=3.2.0 keystoneauth1>=3.4.0 keystonemiddleware>=5.1.0 os-win>=4.0.1 oslo.concurrency>=4.5.1 oslo.config>=8.1.0 oslo.context>=2.22.0 oslo.db>=5.0.0 oslo.i18n>=5.0.0 oslo.limit>=1.6.0 oslo.log>=4.5.0 oslo.messaging!=9.0.0,>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=4.4.0 oslo.reports>=1.18.0 oslo.upgradecheck>=1.3.0 oslo.utils>=4.7.0 osprofiler>=1.4.0 pbr>=3.1.1 python-keystoneclient>=3.8.0 retrying!=1.3.0,>=1.2.3 stevedore!=3.0.0,>=1.20.0 taskflow>=4.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867985.0 glance-29.0.0/glance.egg-info/top_level.txt0000664000175000017500000000000700000000000020530 0ustar00zuulzuul00000000000000glance ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/httpd/0000775000175000017500000000000000000000000014201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/httpd/README0000664000175000017500000000013300000000000015056 0ustar00zuulzuul00000000000000Documentation for running Glance with Apache HTTPD is in doc/source/admin/apache-httpd.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/httpd/glance-api-uwsgi.ini0000664000175000017500000000060400000000000020036 0ustar00zuulzuul00000000000000[uwsgi] socket-timeout = 10 http-auto-chunked = true http-chunked-input = true http-raw-body = true chmod-socket = 666 lazy-apps = true add-header = Connection: close buffer-size = 65535 thunder-lock = true plugins = python enable-threads = true exit-on-reload = true die-on-term = true master = true processes = 4 http-socket = 127.0.0.1:60999 wsgi-file = /usr/local/bin/glance-wsgi-api ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/httpd/uwsgi-glance-api.conf0000664000175000017500000000013500000000000020203 0ustar00zuulzuul00000000000000KeepAlive Off SetEnv proxy-sendchunked 1 ProxyPass "/image" "http://127.0.0.1:60999" retry=0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/playbooks/0000775000175000017500000000000000000000000015061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/playbooks/enable-fips.yaml0000664000175000017500000000005000000000000020125 0ustar00zuulzuul00000000000000- hosts: all roles: - enable-fips ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/playbooks/post-check-metadata-injection.yaml0000664000175000017500000000170400000000000023545 0ustar00zuulzuul00000000000000# This playbook is for OpenDev infra consumption only. - hosts: controller tasks: - name: Run glance validation script shell: executable: /bin/bash cmd: | source /opt/stack/devstack/openrc set -xe cirrosimg=$(glance image-list | grep cirros | cut -d" " -f 2) # There could be more than one cirros image so traverse through the list for image in $cirrosimg do echo "Dumping the cirros image for debugging..." glance image-show $image echo "Checking that the cirros image was decorated with metdata on import..." glance image-list --property-filter 'glance_devstack_test=doyouseeme?' | grep $image echo "Checking that the cirros image was converted to raw on import..." glance image-show $image | egrep -e 'disk_format.*raw' done environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/rally-jobs/0000775000175000017500000000000000000000000015134 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/rally-jobs/README.rst0000664000175000017500000000203000000000000016616 0ustar00zuulzuul00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * glance.yaml is a task that is run in gates against OpenStack (nova network) deployed by DevStack Useful links ------------ * More about Rally: https://rally.readthedocs.io/en/latest/ * Rally release notes: https://rally.readthedocs.io/en/latest/project_info/release_notes/archive.html * How to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html * About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html * Plugin samples: https://github.com/openstack/rally/tree/stable/0.9/samples/plugins ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/rally-jobs/extra/0000775000175000017500000000000000000000000016257 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/rally-jobs/extra/README.rst0000664000175000017500000000025400000000000017747 0ustar00zuulzuul00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/rally-jobs/extra/fake.img0000664000175000017500000000000000000000000017651 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/rally-jobs/glance.yaml0000664000175000017500000000172300000000000017254 0ustar00zuulzuul00000000000000--- version: 2 title: Task used by gate-rally-dsvm-glance-ubuntu-xenial-nv and gate-rally-dsvm-py35-glance-nv subtasks: - title: Test Glance upload and list image performance scenario: GlanceImages.create_and_list_image: image_location: "~/.rally/extra/fake.img" container_format: "bare" disk_format: "qcow2" runner: constant: times: 700 concurrency: 7 contexts: users: tenants: 1 users_per_tenant: 1 - title: Test Glance upload and delete image performance scenario: GlanceImages.create_and_delete_image: image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" container_format: "bare" disk_format: "qcow2" runner: constant: times: 20 concurrency: 5 contexts: users: tenants: 5 users_per_tenant: 2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9183102 glance-29.0.0/rally-jobs/plugins/0000775000175000017500000000000000000000000016615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/rally-jobs/plugins/README.rst0000664000175000017500000000061200000000000020303 0ustar00zuulzuul00000000000000Rally plugins ============= All ``*.py`` modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867985.782294 glance-29.0.0/releasenotes/0000775000175000017500000000000000000000000015547 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9503138 glance-29.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000021150 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml0000664000175000017500000000062700000000000031240 0ustar00zuulzuul00000000000000--- security: - Fixing bug 1525915; image might be transitioning from active to queued by regular user by removing last location of image (or replacing locations with empty list). This allows user to re-upload data to the image breaking Glance's promise of image data immutability. From now on, last location cannot be removed and locations cannot be replaced with empty list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/Stein-reno-rc1-0a03f8394934a2e7.yaml0000664000175000017500000000617500000000000024360 0ustar00zuulzuul00000000000000--- prelude: | Stein release cycle has been without major changes to the Images API, this release does not introduce new Images API minor version. Some work has been done on Windows compatibility and Glance Stein release is compatible running on Windows platform. Future development will be also gated on Windows based on 3rd party CI model. Due to some unresolved issues on consuming multiple backends work the stabilization of the feature has been pushed to Train and will stay EXPERIMENTAL on Stein. This release also contains some feature work, life improving changes and bug fixes. Please refer the rest of the release notes and docs for details. features: - | Re-introducing cache-manage utility. In Rocky the Images API v1 dependent glance-cache-manage utility was removed while removing the v1 endpoints. Stein release introduces the command refactored to utilize the Images API version 2. - | Added oslopolicy enforcer entrypoint making it possible to utilize oslopolicy-policy-generator to get uniform information about the policies. NOTE: Glance will require policy.json being present for any meaningful output. issues: - | Multiple back-ends: No default back-end gets assigned when adding location via the locations api and not defining back-end ID on the call. This might affect specially Nova snapshots utilizing rdb back-end. fixes: - | The following are some highlights of the bug fixes included in this release. * Bug 1781617_: Rename ``async`` package to ``async_`` (Python 3.7) * Bug 1781627_: Handle StopIteration for Py3.7 PEP 0479 * Bug 1695299_: Support RFC1738 quoted chars in passwords * Bug 1750892_: Update status to active when locations replaced * Bug 1770410_: Use WebOb 1.8.1 * Bug 1793057_: Provision to add new config options in sample config file * Bug 1800601_: py3: fix recursion issue under py37 * Bug 1805765_: Image conversion fails * Bug 1803643_: Fix for FK constraint violation * Bug 1808063_: Guard __getattr__ on QuotaImageTagsProxy * Bug 1809462_: Correct typo in config option choices (Image conversion) * Bug 1803299_: Failure in web-dowload kept image in importing state * Bug 1818919_: py3: Fix return type on CooperativeReader.read * Bug 1803498_: Data remains in staging area if 'file' store is not enabled .. _1781617: https://code.launchpad.net/bugs/1781617 .. _1781627: https://code.launchpad.net/bugs/1781627 .. _1695299: https://code.launchpad.net/bugs/1695299 .. _1750892: https://code.launchpad.net/bugs/1750892 .. _1770410: https://code.launchpad.net/bugs/1770410 .. _1793057: https://code.launchpad.net/bugs/1793057 .. _1800601: https://code.launchpad.net/bugs/1800601 .. _1805765: https://code.launchpad.net/bugs/1805765 .. _1803643: https://code.launchpad.net/bugs/1803643 .. _1808063: https://code.launchpad.net/bugs/1808063 .. _1809462: https://code.launchpad.net/bugs/1809462 .. _1803299: https://code.launchpad.net/bugs/1803299 .. _1818919: https://code.launchpad.net/bugs/1818919 .. _1803498: https://code.launchpad.net/bugs/1803498 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/Train-milestone3-be5520106a182fa0.yaml0000664000175000017500000000136400000000000025036 0ustar00zuulzuul00000000000000--- features: - | Train release includes a change to how cache prefetching works. As the prefetcher was one of the last components still relying to the glance-registry the requirement was removed by implementing the prefetcher as part of glance-api. Crontab based prefetcher is not available anymore and the new prefetching will be set up through glance-api.conf. upgrade: - | Location metadata key ``backend`` has been changed to ``store``. Any environment that might be using the old ``backend`` key will have the key name changed through lazy update upon access. fixes: - | Bug 1836140_: Image deletion returns 500 if 'file' store is not enabled .. _1836140: https://bugs.launchpad.net/glance/+bug/1836140 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-all-visibility-image-filter-ea2f3948ff778fe3.yaml0000664000175000017500000000016500000000000030136 0ustar00zuulzuul00000000000000--- features: - | You can now list all images that are available to you. Use the 'all' visibility option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-cli-and-cache-opts-902f28d65c8fb827.yaml0000664000175000017500000000026400000000000026021 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1972666_: Added cli_opts and cache_opts to support configgen to pick all groups from wsgi.py .. _1972666: https://code.launchpad.net/bugs/1972666 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-compressed-format-185e537187a202bd.yaml0000664000175000017500000000653600000000000026027 0ustar00zuulzuul00000000000000--- features: - | The identifier ``compressed`` has been added to the list of supported container formats. The intent is that this format identifier will be used for any compressed file archive format (for example, gzip or rar) that is not otherwise covered by the existing container format identifiers. As with all container formats, Glance does not verify that the data payload of an image is actually in that format. Further, you should not expect other OpenStack services to be able to handle arbitrary compressed file formats. Consult the documentation of any services that will consume your image for details. upgrade: - | The identifier ``compressed`` has been added to the list of supported container formats. The intent is that this format identifier will be used for any compressed file archive format (for example, gzip or rar) that is not otherwise covered by the existing container format identifiers. The ``compressed`` container format was added in support of the Cinder (Block Storage Service) feature `Leverage compression accelerator `_. You may expect that Cinder will be able to consume any image in ``compressed`` container format *that Cinder has created*. You should not expect, however, for other services to be able to consume such an image at the present time. Further, you should not expect Cinder to be able to successfully use an image in ``compressed`` format that it has not created itself. issues: - | The introduction of the ``compressed`` container format in this release gives us the opportunity to remind you that Glance does not verify that the ``container_format`` image property is accurate for *any* container format. It is the responsibility of the image consumer to verify the image data payload format and take appropriate action in the case of a misdescribed image. - | The intent of the ``compressed`` container format identifier introduced in this release is that it will be used for any compressed file archive format (for example, gzip or rar) that is not otherwise covered by the existing container format identifiers. The exact format of the compressed file is unspecified. It is the responsibility of the consuming service to analyze the data payload and determine the compression format. A particular OpenStack service may only support specific formats. Thus, even if a service does support the ``compressed`` container format, this does not imply that the service can handle arbitrary compression formats. Consult the documentation for the service that will consume your image for details. - | As of this release, the only service using the ``compressed`` container format is Cinder (Block Storage Service), when Cinder is configured to use compression when uploading a volume-image to Glance. While you may expect that Cinder will be able to consume any image in ``compressed`` container format *that Cinder has created*, you should not expect Cinder to be able to successfully use an image in ``compressed`` format that it has not created itself. Consult the `Cinder documentation `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-cpu-thread-pinning-metadata-09b1866b875c4647.yaml0000664000175000017500000000023100000000000027572 0ustar00zuulzuul00000000000000--- upgrade: - Added additional metadata for CPU thread pinning policies to 'compute-cpu-pinning.json'. Use the ``glance_manage`` tool to upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-description-common-image-property-95ab1139d41579d2.yaml0000664000175000017500000000076700000000000031156 0ustar00zuulzuul00000000000000--- features: - | A new common image property, 'description', has been added. This allows you to specify a brief human-readable description, suitable for display in a user interface, on images. It has been possible to do this previously using a custom image property; this change simply standardizes the usage in order to promote interoperability. This change has no effect on any property named 'description' on existing images, and it is not a required image property. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-glance-download-method-be6d9e927b8b0a43.yaml0000664000175000017500000000075100000000000027132 0ustar00zuulzuul00000000000000--- features: - | Glance to glance image import plugin. With this release users can import an image from an other glance server from an other opensatck region. The two glance services must use the same keystone service. The feature use the same keystone authentication token on both glance services and copy by default container_format, disk_format and customizable properties from source image ``['hw_', 'trait:', 'os_distro', 'os_secure_boot', 'os_type']`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-new-add-location-api-acd459299976b4a5.yaml0000664000175000017500000000175600000000000026373 0ustar00zuulzuul00000000000000--- features: - | This release brings the additional functionality of adding new location to a ``queued`` state image which will replace the image-update mechanism for consumers like cinder and nova to address OSSN-0090 and OSSN-0065. issues: - | In case of ``http`` store if bad value is passed for ``os_hash_value`` in validation data then task fails which is expected but it stores location of the image which is wrong, that needs to be popped out. The location doesn't get deleted because deletion of locatio is not allowed for ``http`` store. Here image needs to be deleted as it is of no use. - | During validation of hashing data when do_secure_hash is `false`, we can just validate length expected for hash_algo and not actual expected hash value. If garbage hash_value with expected size has been provided, image becomes active after adding location but it will be of no use as download or boot will fail with corrupt image error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-new-get-locations-api-83c4b6dc077efc5f.yaml0000664000175000017500000000033500000000000027005 0ustar00zuulzuul00000000000000--- features: - | This release brings the additional functionality of get locations associated to an image accessible to only service users i.e., consumers like cinder and nova for OSSN-0090 and OSSN-0065. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-ploop-format-fdd583849504ab15.yaml0000664000175000017500000000061200000000000025071 0ustar00zuulzuul00000000000000--- prelude: > - Add ``ploop`` to the list of supported disk formats. features: - The identifier ``ploop`` has been added to the list of supported disk formats in Glance. The respective configuration option has been updated and the default list shows ``ploop`` as a supported format. upgrade: - The ``disk_format`` config option enables ``ploop`` as supported by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-processlimits-to-qemu-img-c215f5d90f741d8a.yaml0000664000175000017500000000077700000000000027602 0ustar00zuulzuul00000000000000--- security: - All ``qemu-img info`` calls are now run under resource limitations that limit the CPU time and address space usage of the process running the command to 2 seconds and 1 GB respectively. This addresses the bug https://bugs.launchpad.net/glance/+bug/1449062 Current usage of "qemu-img" is limited to Glance tasks, which by default (since the Mitaka release) are only available to admin users. We continue to recommend that tasks only be exposed to trusted users ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add-vhdx-format-2be99354ad320cca.yaml0000664000175000017500000000060100000000000025032 0ustar00zuulzuul00000000000000--- prelude: > - Add ``vhdx`` to list of supported disk format. features: - The identifier ``vhdx`` has been added to the list of supported disk formats in Glance. The respective configuration option has been updated and the default list shows ``vhdx`` as a supported format. upgrade: - The ``disk_format`` config option enables ``vhdx`` as supported by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add_capability_to_purge_all_deleted_rows-7b3b9b767669b1a5.yaml0000664000175000017500000000013200000000000032154 0ustar00zuulzuul00000000000000--- features: - | "glance-manage" purges all deleted rows if "--max_rows" equals -1 ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=glance-29.0.0/releasenotes/notes/add_policy_enforcement_for_metadef_delete_apis-95d2b16cc444840a.yaml 22 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/add_policy_enforcement_for_metadef_delete_apis-95d2b16cc444840a.yam0000664000175000017500000000102200000000000033120 0ustar00zuulzuul00000000000000--- features: - | Policy enforcement for several Metadata Definition delete APIs are added in this release. The following actions are enforced and added to the policy.json: - ``delete_metadef_namespace`` - ``delete_metadef_object`` - ``remove_metadef_resource_type_association`` - ``remove_metadef_property`` - ``delete_metadef_tag`` - ``delete_metadef_tags`` This prevents roles that should not have access to these APIs from performing the APIs associated with the actions above. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/added-quota-usage-api-f1914054132f2021.yaml0000664000175000017500000000067400000000000025527 0ustar00zuulzuul00000000000000--- features: - | This release brings additional functionality to the unified quota work done in the previous release. A usage API is now available, which provides a way for users to see their current quota limits and their active resource usage towards them. For more information, see the discovery section in the `api-ref `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/added-store-detail-api-215810aa85dfbb99.yaml0000664000175000017500000000025100000000000026200 0ustar00zuulzuul00000000000000--- features: - | This release brings additional functionality to the stores API. The stores detail API helps in providing the store specific information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/alembic-migrations-902b31edae7a5d7d.yaml0000664000175000017500000000363300000000000025707 0ustar00zuulzuul00000000000000--- prelude: > - **Experimental** zero-downtime database upgrade using an expand-migrate-contract series of operations is available. upgrade: - | The database migration engine used by Glance for database upgrades has been changed from *SQLAlchemy Migrate* to *Alembic* in this release. * This has necessitated a change in the location and naming convention for migration scripts. Developers, operators, and DevOps are strongly encouraged to read through the `Database Management`_ section of the Glance documentation for details of the changes introduced in the Ocata release. Here's a brief summary of the changes: - All the ``glance manage db`` commands are changed appropriately to use Alembic to perform operations such as ``version``, ``upgrade``, ``sync`` and ``version_control``. Hence, the "old-style" migration scripts will no longer work with the Ocata glance manage db commands. - Database versions are no longer numerical. Instead, they are the *revision ID* of the last migration applied on the database. * For example, the Liberty migration, which was version ``42`` under the old system, will now appear as ``liberty``. The Mitaka migrations ``43`` and ``44`` appear as ``mitaka01`` and ``mitaka02``, respectively. * The change in migration engine has been undertaken in order to enable zero-downtime database upgrades, which are part of the effort to implement rolling upgrades for Glance (scheduled for the Pike release). - A preview of zero-downtime database upgrades is available in this release, but it is **experimental** and **not supported for production systems**. Please consult the `Database Management`_ section of the Glance documentation for details. .. _`Database Management`: http://docs.openstack.org/developer/glance/db.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/antelope-milestone-2-d89e39412f9c0334.yaml0000664000175000017500000000117000000000000025613 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1990854_: oslo_limit section not clear - | Bug 1779781_: virt/vmware not support VirtualSriovEthernetCard - | Bug 1647491_: Missing documentation for glance-manage db_purge command - | Bug 1983279_: Cannot upload vmdk images due to unsupported vmdk format - | Bug 1989268_: Wrong assertion method .. _1990854: https://code.launchpad.net/bugs/1990854 .. _1779781: https://code.launchpad.net/bugs/1779781 .. _1647491: https://code.launchpad.net/bugs/1647491 .. _1983279: https://code.launchpad.net/bugs/1983279 .. _1989268: https://code.launchpad.net/bugs/1989268 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/antelope-milestone-3-b9a4f7fdba31f628.yaml0000664000175000017500000000167200000000000026116 0ustar00zuulzuul00000000000000--- prelude: > In this cycle Glance enabled the API policies (RBAC) new defaults and scope by default and removed the deprecated ``enforce_secure_rbac`` option which is no longer needed after switching to new defaults. The Default value of config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed to ``True``. Old policies are still there but they are disabled by default. fixes: - | Bug 1996188_: [OSSA-2023-002] Arbitrary file access through custom VMDK flat descriptor (CVE-2022-47951) - | Bug 1939690_: The api-ref response and the actual response returned from the Create Tags API does not match - | Bug 1983279_: Cannot upload vmdk images due to unsupported vmdk format .. _1996188: https://code.launchpad.net/bugs/1996188 .. _1939690: https://code.launchpad.net/bugs/1939690 .. _1983279: https://code.launchpad.net/bugs/1983279 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/api-2-6-current-9eeb83b7ecc0a562.yaml0000664000175000017500000000553100000000000024701 0ustar00zuulzuul00000000000000--- prelude: > - The CURRENT version of the Images API v2 is bumped to **2.6**. The 2.6 API was available in the previous (Pike) release as an experimental API to introduce the calls necessary for the `interoperable image import functionality`_. - A new interoperable image import method, ``web-download`` is introduced. features: - | A new interoperable image import method, ``web-download`` is introduced. This method allows an end user to import an image from a remote URL. The image data is retrieved from the URL and stored in the Glance backend. (In other words, this is a **copy-from** operation.) This feature is enabled by default, but it is optional. Whether it is offered at your installation depends on the value of the ``enabled_import_methods`` configuration option in the ``glance-api.conf`` file (assuming, of course, that you have not disabled image import at your site). upgrade: - | The **CURRENT** version of the Images API supplied by Glance is introduced as **2.6**. It includes the new API calls introduced on an experimental basis in the Pike release. While the 2.6 API is CURRENT, whether the interoperable image import functionality it makes available is exposed to end users is controlled by a configuration option, ``enable_image_import``. Although this option existed in the previous release, its effect is slightly different in Queens. * ``enable_image_import`` is **True** by default (in Pike it was False) * When ``enable_image_import`` is **True**, a new import-method, ``web-download`` is available. (In Pike, only ``glance-direct`` was offered.) Which import-methods you offer can be configured using the ``enabled_import_methods`` option in the ``glance-api.conf`` file. * If ``enable_image_import`` is set **False**, requests to the v2 endpoint for URIs defined only in v2.6 will return 404 (Not Found) with a message in the response body stating "Image import is not supported at this site." Additionally, the image-create response will not contain the "OpenStack-image-import-methods" header. The ``enable_image_import`` configuration option was introduced as DEPRECATED in Pike and will be removed in Rocky. The discovery calls defined in the `refactored image import spec`_ remain in an abbreviated form in this release. Finally, there are no changes to the version 2.5 API in this release. All version 2.5 calls will work whether the new import functionality is enabled or not. .. _`interoperable image import functionality`: https://developer.openstack.org/api-ref/image/v2/#interoperable-image-import .. _`refactored image import spec`: https://specs.openstack.org/openstack/glance-specs/specs/mitaka/approved/image-import/image-import-refactor.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/api-2.16-8417b1e23322fedb.yaml0000664000175000017500000000121300000000000023123 0ustar00zuulzuul00000000000000--- features: - | When the Glance image cache is being used, the CURRENT version of the Image service API, as indicated in the ``GET /versions`` response, is 2.16. upgrade: - | The Image service API call ``PUT /v2/cache/{image_id}`` now returns a 202 (Accepted) response code to indicate success. In glance 24.0.0 (the initial Yoga release), it had mistakenly returned a 200. fixes: - | Bug `1971521 `_: Fixed the success response code of the REST API call ``PUT /v2/cache/{image_id}`` to be 202 (Accepted), following the original design of the feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/api-minor-ver-bump-2-6-aa3591fc58f08055.yaml0000664000175000017500000000266600000000000025664 0ustar00zuulzuul00000000000000--- prelude: > - The *minor* version of the Images API v2 is bumped to **2.6** to introduce an EXPERIMENTAL version of the API that includes the new calls introduced for the Minimal Viable Product delivery of the `refactored image import`_ functionality. Version **2.5** remains the CURRENT version of the Images API. upgrade: - | An **EXPERIMENTAL** version of the Images API supplied by Glance is introduced as **2.6**. It includes the new API calls introduced for the `refactored image import`_ functionality. This functionality is **not** enabled by default, so the CURRENT version of the Images API remains at 2.5. There are no changes to the version 2.5 API in this release, so all version 2.5 calls will work whether or not the new import functionality is enabled or not. The version 2.6 API is being introduced as EXPERIMENTAL because it is a Minimal Viable Product delivery of the functionality described in the `refactored image import`_ specification. As an MVP, the responses described in that specification are abbreviated in version 2.6. It is expected that version 2.6 will be completed in Queens, but at this time, we encourage operators to try out the new functionality, but keep in mind its EXPERIMENTAL nature. .. _`refactored image import`: https://specs.openstack.org/openstack/glance-specs/specs/mitaka/approved/image-import/image-import-refactor.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/api-minor-version-bump-bbd69dc457fc731c.yaml0000664000175000017500000000163600000000000026472 0ustar00zuulzuul00000000000000--- prelude: > - The *minor* version of the Images API v2 is bumped to **2.5**. upgrade: - | The **CURRENT** version of the version 2 Images API supplied by Glance is now **2.5**. Changes include: * The 'visibility' enumeration has been increased from two values (``public``, ``private``) to four values (``public``, ``private``, ``shared``, and ``community``). * Formerly, it was possible to add members to an image whose visibility was ``private``, thereby creating a "shared" image. In this release, an image must have a visibility of ``shared`` in order to accept member operations. Attempting to add a member to an image with a visibility of ``private`` will result in a `4xx response`_ containing an informative message. .. _`4xx response`: https://developer.openstack.org/api-ref/image/v2/?expanded=create-image-member-detail#create-image-member ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bobcat-milestone-1-releasenotes-2d109105530877d6.yaml0000664000175000017500000000022100000000000027545 0ustar00zuulzuul00000000000000--- fixes: - | Bug 2007354_: duplicate values in compute-host-capabilities.json .. _2007354: https://code.launchpad.net/bugs/2007354 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bobcat-milestone-2-releasenotes-085084b03f66d671.yaml0000664000175000017500000000044300000000000027640 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1937901_: healthcheck middleware should be deployed as app instead of filter - | Bug 1889664_: Image Import 'web-download' is broken with py37+ .. _1937901: https://code.launchpad.net/bugs/1937901 .. _1889664: https://code.launchpad.net/bugs/1889664 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bp-barbican-secret-deletion-support-40cffa5ffa33447e.yaml0000664000175000017500000000425700000000000031106 0ustar00zuulzuul00000000000000--- features: - | To support the Block Storage service (Cinder) upload-volume-to-image action when the volume is an encrypted volume type, when such an image is deleted, Glance will now contact the OpenStack Key Management service (Barbican) and request it to delete the associated encryption key. Two extra properties must be set on the image for this to work: ``cinder_encryption_key_id`` (whose value is the identifier in the OpenStack Key Management service for the encryption key used to encrypt the volume) and ``cinder_encryption_key_deletion_policy`` (whose value may be either ``on_image_deletion`` or ``do_not_delete``). Please note the following: * An image created by the Block Storage service will have these properties set automatically, with the deletion policy set to ``on_image_deletion``. * The Block Storage service *always* creates a new secret in Barbican when it uploads a volume as an image, keeping a 1-1 relation between each secret stored in the Key Management Service and each image of an encrypted volume stored in Glance. Thus, deleting the Barbican secret *at the time when the image is deleted* will not cause data loss *as long as the secret is not being used for any other purpose*. * The Block Storage service will not use the secret associated with an image for any other purpose. * If you choose to use the Barbican secret identified by the value of ``cinder_encryption_key_id`` for any other purpose, you risk data loss. * Manual use of the ``cinder_encryption_key_*`` properties is *not* recommended. * If the ``cinder_encryption_key_deletion_policy`` image property is missing or has any value other than ``on_image_deletion``, Glance will **not** attempt to delete the key whose identifier is the value of ``cinder_encryption_key_id``. upgrade: - | The properties ``cinder_encryption_key_id`` and ``cinder_encryption_key_deletion_policy`` have been added to the *common image properties* and appear in the image schema. See the "New Features" section of these notes for information about these image properties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bp-inject-image-metadata-0a08af539bcce7f2.yaml0000664000175000017500000000327100000000000026651 0ustar00zuulzuul00000000000000--- features: - | Added a plugin to inject image metadata properties to non-admin images created via the interoperable image import process. upgrade: - | Added a plugin to inject image metadata properties to non-admin images created via the interoperable image import process. This plugin implements the spec `Inject metadata properties automatically to non-admin images`_. See the spec for a discussion of the use case addressed by this plugin. Use of the plugin requires configuration as described in the `The Image Property Injection Plugin`_ section of the Glance Admin Guide. Note that the plugin applies *only* to images imported via the `interoperable image import process`_. Thus images whose data is set using the `image data upload`_ call will *not* be processed by the plugin and hence will not have properties injected. You can force end users to use the interoperable image import process by restricting the data upload call, which is governed by the ``upload_image`` policy in the Glance ``policy.json`` file. See the documentation for more information. .. _`Inject metadata properties automatically to non-admin images`: https://specs.openstack.org/openstack/glance-specs/specs/queens/approved/glance/inject-automatic-metadata.html .. _`interoperable image import process`: https://developer.openstack.org/api-ref/image/v2/#interoperable-image-import .. _`The Image Property Injection Plugin`: https://docs.openstack.org/glance/latest/admin/interoperable-image-import.html#the-image-property-injection-plugin .. _`image data upload`: https://developer.openstack.org/api-ref/image/v2/#upload-binary-image-data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bp-mitigate-ossn-0075-c0e74e60d86d8ea2.yaml0000664000175000017500000000056500000000000025645 0ustar00zuulzuul00000000000000--- security: - | The ``glance-manage`` tool has been updated to address `OSSN-0075`_. Please see the `Database Maintenance`_ section of the Glance Administration Guide for details. .. _`OSSN-0075`: https://wiki.openstack.org/wiki/OSSN/OSSN-0075 .. _`Database Maintenance`: https://docs.openstack.org/glance/latest/admin/db.html#database-maintenance ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bp-upgrade-checks-b3272c3ddb4e8cf7.yaml0000664000175000017500000000070200000000000025424 0ustar00zuulzuul00000000000000--- features: - | [`Community Goal `_] Support has been added for developers to write pre-upgrade checks. Operators can run these checks using ``glance-status upgrade check``. This allows operators to be more confident when upgrading their deployments by having a tool that automates programmable checks against the deployment configuration or dataset. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bp-virtio-packed-ring-configuration-support-0cd0333c1c52c02b.yaml0000664000175000017500000000057500000000000032430 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified: * Added ``hw:virtio_packed_ring`` boolean in the ``OS::Nova::Flavor`` namespace, and ``hw_virtio_packed_ring`` boolean in the ``OS::Glance::Image`` namespace. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml0000664000175000017500000000100300000000000023455 0ustar00zuulzuul00000000000000--- upgrade: - Metadata definitions previously associated with OS::Nova::Instance have been changed to be associated with OS::Nova::Server in order to align with Heat and Searchlight. You may either upgrade them using glance-manage db load_metadefs [path] [merge] [prefer_new] or glance-manage db upgrade 44. fixes: - Metadata definitions previously associated with OS::Nova::Instance have been changed to be associated with OS::Nova::Server in order to align with Heat and Searchlight. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1593177-8ef35458d29ec93c.yaml0000664000175000017500000000031600000000000023435 0ustar00zuulzuul00000000000000--- upgrade: - The ``default`` policy in ``policy.json`` now uses the admin role rather than any role. This is to make the policy file restrictive rather than permissive and tighten security. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1719252-name-validation-443a2e2a36be2cec.yaml0000664000175000017500000000062000000000000026574 0ustar00zuulzuul00000000000000--- other: - | The metadefs schemas for 'property', 'properties', 'tag', 'tags', 'object', and 'objects' previously specified a 'name' element of maximum 255 characters. Any attempt to add a name of greater than 80 characters in length, however, resulted in a 500 response. The schemas have been corrected to specify a maximum length of 80 characters for the 'name' field. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1861334-ebc2026b85675d47.yaml0000664000175000017500000000024100000000000023325 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1861334_: cors config defaults not used when Glance is run as WSGI app .. _1861334: https://bugs.launchpad.net/glance/+bug/1861334 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1881958-d0e16538f3c0ffaa.yaml0000664000175000017500000000030500000000000023552 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1881958_: read-only http store should not be used if --all-stores specified for import/copy image workflow .. _1881958: https://bugs.launchpad.net/glance/+bug/1881958 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1979699-70182ec2aead0383.yaml0000664000175000017500000000032400000000000023420 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1979699 `_: Fix the ``glance-cache-prefetcher`` command to set up access to backend stores when the multi store feature is used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-1980049-623d2eb0fa074136.yaml0000664000175000017500000000103300000000000023312 0ustar00zuulzuul00000000000000--- upgrade: - | The Image service API call ``DELETE /v2/cache/{image_id}`` and ``DELETE /v2/cache`` now returns a 204 (No Content) response code to indicate success. In glance 24.0.0 (the initial Yoga release), it had mistakenly returned a 200. fixes: - | Bug `1980049 `_: Fixed the success response code of the REST API call ``DELETE /v2/cache/{image_id}`` and ``DELETE /v2/cache`` to be 204 (No Content), following the original design of the feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bug-2059809-disallow-qcow2-datafile-5d5ff4dbd590c911.yaml0000664000175000017500000000150100000000000030110 0ustar00zuulzuul00000000000000--- security: - | Images in the qcow2 format with an external data file are now rejected from glance because such images could be used in an exploit to expose host information. See `Bug #2059809 `_ for details. fixes: - | `Bug #2059809 `_: Fixed issue where a qcow2 format image with an external data file could expose host information. Such an image format with an external data file will be rejected from glance. To achieve the same, format_inspector has been extended by adding safety checks for qcow2 and vmdk files in glance. Unsafe qcow and vmdk files will be rejected by pre-examining them with a format inspector to ensure safe configurations prior to any qemu-img operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/bump-api-2-4-efa266aef0928e04.yaml0000664000175000017500000000066400000000000024102 0ustar00zuulzuul00000000000000--- prelude: > - Glance API ``minor`` version bumped to 2.4. upgrade: - | Glance API **CURRENT** ``minor`` version is now ``2.4``. * To partially fix an important image locations bug 1587985, an API impacting change has been merged into Glance. * This will result into a non-backward compatible experience before and after **Newton** release, for users using ``add`` feature to image locations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/cache-api-b806ccfb8c5d9bb6.yaml0000664000175000017500000000056100000000000024041 0ustar00zuulzuul00000000000000--- features: - | This release introduces new APIs for cache related operations. This new version of the cache API will help administrators to cache images on dedicated glance nodes as well. For more information, see the ``Cache Manage`` section in the `api-ref-guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/caracal-milestone-3-releasenotes-534b1daa3e1f254c.yaml0000664000175000017500000000047400000000000030261 0ustar00zuulzuul00000000000000--- fixes: - | Bug 2049064_: Unit/functional test failures with oslo.limit 2.3.0 - | Bug 2028895_: Interoperable Image Import in glance documented format for inject not working as expected .. _2049064: https://code.launchpad.net/bugs/2049064 .. _2028895: https://code.launchpad.net/bugs/2028895 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/cinder-store-migration-non-owner-80a2a8114d8602aa.yaml0000664000175000017500000000114300000000000030204 0ustar00zuulzuul00000000000000--- fixes: - | The cinder store lazy migration code assumed that the user performing the GET was authorized to modify the image in order to perform the update. This will not be the case for shared or public images where the user is not the owner or an admin, and would result in a 404 to the user if a migration is needed but not completed. Now, we delay the migration if we are not sufficiently authorized, allowing the first GET by the owner (or an admin) to perform it. See Bug 1932337_ for more information. .. _1932337: https://bugs.launchpad.net/glance/+bug/1932337 ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=glance-29.0.0/releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.yaml 22 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/clean-up-acceptable-values-store_type_preference-39081e4045894731.y0000664000175000017500000000067700000000000032422 0ustar00zuulzuul00000000000000--- upgrade: - | Deprecated values are no longer recognized for the configuration option ``store_type_preference``. The two non-standard values 'filesystem' and 'vmware_datastore' were DEPRECATED in Newton and are no longer operable. The correct values for those stores are 'file' and 'vmware'. See the Newton release notes for more information at https://docs.openstack.org/releasenotes/glance/newton.html#upgrade-notes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/cleanout_registry_data-api-9d91368aed83497e.yaml0000664000175000017500000000066500000000000027257 0ustar00zuulzuul00000000000000--- deprecations: - | This release removes endpoints and config options related to glance-registry. Including but not limited to config option 'data-api' which has no production supported options left. SimpleDB has not been supported since moving DB migrations to alembic and registry is removed. All registry specific options and config files have been removed. 'glance-registry' command has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/cleanup-enable_v2_api-9b9b467f4ae8c3b1.yaml0000664000175000017500000000013700000000000026204 0ustar00zuulzuul00000000000000--- deprecations: - | The deprecated 'enable_v2_api' config option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/consistent-store-names-57374b9505d530d0.yaml0000664000175000017500000000265200000000000026210 0ustar00zuulzuul00000000000000--- upgrade: - | Some backend store names were inconsistent between glance and glance_store. This meant that operators of the VMware datastore or file system store were required to use store names in ``glance-api.conf`` that did not correspond to any valid identifier in glance_store. As this situation encouraged misconfiguration and operator unhappiness, we have made the store names consistent in the Newton release. What this means for you: * This change applies only to operators who are using multiple image locations * This change applies only to operators using the VMware datastore or filesystem stores * This change applies only to the ``store_type_preference`` option * *VMware datastore operators*: The old name, now **DEPRECATED**, was ``vmware_datastore``. The **new** name, used in both glance and glance_store, is ``vmware`` * *File system store operators*: the old name, now **DEPRECATED**, was ``filesystem``. The **new** name, used in both glance and glance_store, is ``file`` * This change is backward compatible, that is, the old names will be recognized by the code during the deprecation period. Support for the deprecated names will be removed in the **Pike** release * We strongly encourage operators to modify their ``glance-api.conf`` files immediately to use the **new** names ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/copy-existing-image-94fd0b8d24bc16a0.yaml0000664000175000017500000000130200000000000025726 0ustar00zuulzuul00000000000000--- features: - | Added new import method ``copy-image`` which will copy existing image into multiple stores. upgrade: - | Added new import method ``copy-image`` which will copy existing image into multiple stores. The new import method will work only if multiple stores are enabled in the deployment. To use this feature operator needs to mention ``copy-image`` import method in ``enabled_import_methods`` configuration option. Note that this new internal plugin applies *only* to images imported via the `interoperable image import process`_. .. _`interoperable image import process`: https://developer.openstack.org/api-ref/image/v2/#interoperable-image-import ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/dalmatian-metadef-changes-272f78e019a15ff1.yaml0000664000175000017500000000051700000000000026671 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified in the Dalmatian release: * Added ``hw_firmware_stateless`` boolean in the ``OS::Compute::LibvirtImage`` namespace. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/dalmatian-milestone-1-releasenotes-45150bc42aead80d.yaml0000664000175000017500000000023500000000000030615 0ustar00zuulzuul00000000000000--- fixes: - | Bug 2065087_: glance-cache-prefetcher is not working as threadpool is not set .. _2065087: https://code.launchpad.net/bugs/2065087 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/dalmatian-milestone-2-releasenotes-a35ccfe1cbf95fce.yaml0000664000175000017500000000020700000000000031136 0ustar00zuulzuul00000000000000--- fixes: - | Bug 2059829_: Install and configure (Ubuntu) in glance .. _2059829: https://code.launchpad.net/bugs/2059829 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/dalmatian-milestone-3-76c9712862b6c889.yaml0000664000175000017500000000061700000000000025702 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1636243_: Add CPU Mode Metadata Def - | Bug 2072483_: Revert image status to queued if image conversion fails - | Bug 2061947_: stores-info --detail command fails if swift store is enabled .. _1636243: https://code.launchpad.net/bugs/1636243 .. _2072483: https://code.launchpad.net/bugs/2072483 .. _2061947: https://code.launchpad.net/bugs/2061947 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/delete_from_store-a1d9b9bd5cf27546.yaml0000664000175000017500000000066200000000000025573 0ustar00zuulzuul00000000000000--- features: - | As part of the multi-store efforts this release introduces deletion from single store. Through new '/v2/stores' endpoint the API user can request image to be deleted from single store instead of deleting the whole image. This feature can be used to clean up store metadata in cases where the image data has for some reason disappeared from the store already, except 410 Gone HTTP response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-admin_role-2f9d33ed0785d082.yaml0000664000175000017500000000561000000000000025773 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance API configuration option ``admin_role`` is deprecated in this release and is subject to removal at the beginning of the Victoria development cycle, following the `OpenStack standard deprecation policy `_. What this option does is to grant complete admin access to any authenticated user with a particular role. *This overrides any policy rules configured in the policy configuration file.* While everything will behave as expected if you are also using the default policy settings, this setting may cause anomalous behavior when you are configuring custom policies. Additionally, the default value of this option has been changed in this release. See the "Upgrade Notes" section of this document for more information. If you were previously aware of this option and were actually using it, we apologize for the inconvenience its removal will cause, but overall it will be better for everyone if policy configuration is confined to the policy configuration file and this backdoor is eliminated. The migration path is to explictly mention the role you configured for this option in appropriate places in your policy configuration file. upgrade: - | The default value of the Glance API configuration option ``admin_role`` has been changed in this release. If you were also using the default policy configuration, this change will not affect you. If you were *not* using the default policy configuration, please read on. With the previous default value, any user with the ``admin`` role could act in an administrative context *regardless of what your policy file defined as the administrative context*. And this might not be a problem because usually the ``admin`` role is not assigned to "regular" end users. It does become a problem, however, when operators attempt to configure different gradations of administrator. In this release, the default value of ``admin_role`` has been defined as ``__NOT_A_ROLE_07697c71e6174332989d3d5f2a7d2e7c_NOT_A_ROLE__``. This effectively makes it inoperable (unless your Keystone administrator has actually created such a role and assigned it to someone, which is unlikely but possible, so you should check). If your local policy tests (you have some, right?) indicate that your Glance policies no longer function as expected, then you have been relying on the ``admin_role`` configuration option and need to revise your policy file. (A short term fix would be to set the ``admin_role`` option back to ``admin``, but keep in mind that it *is* a short-term fix, because this configuration option is deprecated and subject to removal.) See the "Deprecation Notes" section of this document for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-allow_additional_image_props-0e3b2f1ffa4e55e1.yaml0000664000175000017500000000133100000000000031754 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance API configuration option ``allow_additional_image_properties`` is deprecated in this release and is subject to removal at the beginning of the Victoria development cycle, following the `OpenStack standard deprecation policy `_. The migration path for operators who were using this option in its nondefault ``False`` setting is to set the ``image_property_quota`` option to ``0``. Since many other OpenStack services depend upon the ability to read/write custom image properties, however, we suspect that no one has been using the option with a nondefault value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-checksum-a602853403e1c4a8.yaml0000664000175000017500000000221000000000000025355 0ustar00zuulzuul00000000000000--- deprecations: - | The Image ``checksum`` property contains an MD5 hash of the image data associated with an image. MD5 has not been considered secure for some time, and in order to comply with various security standards (for example, FIPS), an implementation of the MD5 algorithm may not be available on glance nodes. The secure "multihash" image properties, ``os_hash_algo`` and ``os_hash_value`` have been available on images since glance version 17.0.0 (Rocky). Until this point, the MD5 ``checksum`` property has been populated solely for backward compatability. It is not, however, necessary for validating downloaded image data. Thus, we are announcing the DEPRECATION in this release of the image ``checksum`` property. It will remain as an image property, but beginning with the Victoria release, the ``checksum`` will *not* be populated on new images. Users should instead rely on the secure "multihash" to validate image downloads. The python-glanceclient, for example, has been using multihash validation (with an optional MD5 fallback) since version 2.13.0 (Rocky). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-digest_algorithm-7cab4ef4240c522f.yaml0000664000175000017500000000064400000000000027333 0ustar00zuulzuul00000000000000--- deprecations: - | The ``digest_algorithm`` configuration option has been deprecated in this release and is subject to removal at the beginning of the F development cycle, following the `OpenStack standard deprecation policy `_. This option has had no effect since the removal of native SSL support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml0000664000175000017500000000047500000000000026731 0ustar00zuulzuul00000000000000--- deprecations: - The use_user_token, admin_user, admin_password, admin_tenant_name, auth_url, auth_strategy and auth_region options in the [DEFAULT] configuration section in glance-api.conf are deprecated, and will be removed in the O release. See https://wiki.openstack.org/wiki/OSSN/OSSN-0060 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-glance-cache-manage-c88f07d33fcc7ca5.yaml0000664000175000017500000000055200000000000027622 0ustar00zuulzuul00000000000000deprecations: - | The ``glance-cache-manage`` command is deprecated in this release in favor of the new Cache API. It is subject to removal at the beginning of the Dalmatian development cycle, following the `OpenStack standard deprecation policy `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-5cb692fe889eb52b.yaml0000664000175000017500000000176000000000000031203 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-location_strategy-f658e69700204bbf.yaml0000664000175000017500000000104400000000000027407 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance API configuration options ``location_strategy`` and ``store_type_preference`` are deprecated in this release and are subject to removal at the beginning of the Dalmatian development cycle, following the `OpenStack standard deprecation policy `_. The weighing mechanism introduced in the Bobcat development cycle can be used by operators who would like to prioritize certain stores over others. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-owner_is_tenant-ec8ea36a3f7e9268.yaml0000664000175000017500000000053200000000000027226 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance API configuration option ``owner_is_tenant`` is deprecated in this release and is subject to removal at the beginning of the 'S' development cycle, following the `OpenStack standard deprecation policy `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-registry-ff286df90df793f0.yaml0000664000175000017500000000107100000000000025703 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance Registry Service and its APIs are officially DEPRECATED in this release and are subject to removal at the beginning of the 'S' development cycle, following the `OpenStack standard deprecation policy `_. For more information, see the Glance specification document `Actually Deprecate the Glance Registry `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-scrubber-862c38e0d65557f3.yaml0000664000175000017500000000106500000000000025417 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance scrubber, which is invoked by the ``glance-scrubber`` command, is deprecated in this release and is subject to removal at the beginning of the 2024.2 (Dalmatian) development cycle, following the `OpenStack standard deprecation policy `_. This deprecation notice also applies to the following configuration options: * ``delayed_delete`` * ``scrub_time`` * ``scrub_pool_size`` * ``wakeup_time`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-show-multiple-location-9890a1e961def2f6.yaml0000664000175000017500000000266000000000000030373 0ustar00zuulzuul00000000000000--- prelude: > - Deprecate the ``show_multiple_locations`` configuration option in favor of the existing Role Based Access Control (RBAC) for Image locations which uses ``policy.json`` file to define the appropriate rules. upgrade: - | Some additional points about ``show_multiple_locations`` configuration option deprecation. * Maintaining two different ways to configure, enable and/or disable a feature is painful for developers and operators, so the less granular means of controlling this feature will be eliminated in the **Ocata** release. * For the Newton release, this option will still be honored. However, it is important to update ``policy.json`` file for glance-api nodes. In particular, please consider updating the policies ``delete_image_location``, ``get_image_location`` and ``set_image_location`` as per your requirements. As this is an advanced option and prone to expose some risks, please check the policies to ensure security and privacy of your cloud. * Future releases will ignore this option and just follow the policy rules. It is recommended that this option is disabled for public endpoints and is used only internally for service-to-service communication. * As mentioned above, the same recommendation applies to the policy-based configuration for exposing multiple image locations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-show-multiple-location-continued-646f91b21cd771f7.yaml0000664000175000017500000000217200000000000032271 0ustar00zuulzuul00000000000000--- upgrade: - | The ``show_multiple_locations`` configuration option remains deprecated in this release, but it has not been removed. (It had been scheduled for removal in the Pike release.) Please keep a watch on the Glance release notes and the glance-specs repository to stay informed about developments on this issue. The plan is to eliminate the option and use only policies to control image locations access. This, however, requires some major refactoring. See the `draft Policy Refactor spec `_ for more information. There is no projected timeline for this change, as no one has been able to commit time to it. The Glance team would be happy to discuss this more with anyone interested in working on it. The workaround is to continue to use the ``show_multiple_locations`` option in a dedicated "internal" Glance node that is not accessible to end users. We continue to recommend that image locations not be exposed to end users. See `OSSN-0065 `_ for more information. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=glance-29.0.0/releasenotes/notes/deprecate-show-multiple-location-continued-ussuri-16e8d9d8a59da1bc.yaml 22 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-show-multiple-location-continued-ussuri-16e8d9d8a59da1bc.0000664000175000017500000000164100000000000033155 0ustar00zuulzuul00000000000000--- upgrade: - | The ``show_multiple_locations`` configuration option remains DEPRECATED but not removed in the Ussuri release. We continue to recommend that image locations not be exposed to end users. See `OSSN-0065 `_ for more information. The plan continues to be to eliminate the option and use only policies to control image locations access. This, however, requires some major refactoring, as discussed in the `draft Policy Refactor spec `_. Further, there is no projected timeline for this change, as no one has been able to commit time to it. (The Glance team would be happy to discuss this more with anyone interested in working on it.) Please keep a watch on the Glance release notes and the glance-specs repository to stay informed about developments on this issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-sqlite-cache-driver-1f5f67862f56e0ba.yaml0000664000175000017500000000102300000000000027571 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance cache driver ``sqlite`` is deprecated in this release and is subject to removal at the beginning of the `E` (2025.1) development cycle, following the `OpenStack standard deprecation policy `_. The configuration option ``image_cache_sqlite_db`` related to ``sqlite`` cache driver is also deprecated and is subject to removal at the beginning of `E` (2025.1) development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-v1-api-6c7dbefb90fd8772.yaml0000664000175000017500000000156700000000000025216 0ustar00zuulzuul00000000000000--- prelude: > - The Images (Glance) version 1 API has been DEPRECATED. Please see deprecations section for more information. deprecations: - With the deprecation of the Images (Glance) version 1 API in the Newton release, it is subject to removal on or after the Pike release. The configuration options specific to the Images (Glance) v1 API have also been deprecated and are subject to removal. An indirectly related configuration option enable_v2_api has been deprecated too as it becomes redundant once the Images (Glance) v1 API is removed. Appropriate warning messages have been setup for the deprecated configuration options and when the Images (Glance) v1 API is enabled (being used). Operators are advised to deploy the Images (Glance) v2 API. The standard OpenStack deprecation policy will be followed for the removals. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate-windows-support-557481e4d45912ee.yaml0000664000175000017500000000023600000000000027011 0ustar00zuulzuul00000000000000--- deprecations: - | Support for running Glance in Windows operating systems has been deprecated because of retirement of the Winstackers project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/deprecate_metadata_encryption_key_option-8c6076ca6e361f92.yaml0000664000175000017500000000126200000000000032237 0ustar00zuulzuul00000000000000--- deprecations: - | The Glance API configuration options ``metadata_encryption_key`` is deprecated in this release and is subject to removal at the beginning of the `F` (2025.2) development cycle. The `metadata_encryption_key` and it's related functioanlity don't serve the purpose of encryption of location metadata, whereas it encrypts location url only for specific APIs. Also if enabled this during an upgrade, may disrupt existing deployments, as it does not support/provide db upgrade script to encrypt existing location URLs. Moreover, its functionality for encrypting location URLs is inconsistent which resulting in download failures. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/distributed-image-import-82cff4426731beac.yaml0000664000175000017500000000107300000000000026772 0ustar00zuulzuul00000000000000--- features: - | Glance now supports the ``glance-direct`` import method without needing shared storage common to all API workers. By telling each API worker the URL by which it can be reached directly (from the other workers), a shared staging directory can be avoided while still allowing users to upload their data for import. See the ``worker_self_reference_url`` config option for more details, as well as the `Interoperable Image Import `_ docs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/do-not-load-paste-ini-1ec473693037ee5b.yaml0000664000175000017500000000033400000000000025730 0ustar00zuulzuul00000000000000--- fixes: - | The glance-api service no longer attempts to load ``api-paste.ini`` file as its service config file. All config options should be written in service config files such as ``glance-api.conf``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/drop-py-2-7-863871c7bc047146.yaml0000664000175000017500000000032300000000000023461 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Glance to support py2.7 is OpenStack Train (Glance 19.x). The minimum version of Python now supported by Glance is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/drop-python-3-6-and-3-7-c6f051d5b2b40329.yaml0000664000175000017500000000020100000000000025531 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/drop-sheepdog-b55aae84807d31d9.yaml0000664000175000017500000000041600000000000024544 0ustar00zuulzuul00000000000000--- upgrade: - | The ``sheepdog`` storage backend driver was deprecated in the Train release and has now been removed. Any deployments still using Sheepdog storage will need to migrate to a different backend storage prior to upgrading to this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/drop-support-for-sqlalchemy-migrate-4bcbe7b200697586.yaml0000664000175000017500000000055300000000000030757 0ustar00zuulzuul00000000000000--- upgrade: - | The database migration engine used by Glance for database upgrades was changed from *SQLAlchemy Migrate* to *Alembic* in the 14.0.0 (Ocata) release. Support for *SQLAlchemy Migrate* has now been removed. This means in order to upgrade from a pre-Ocata release to Xena or later, you must upgrade to Wallaby or earlier first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/enable-enforce-scope-and-new-defaults-ef543183e6c2eabb.yaml0000664000175000017500000000072100000000000031245 0ustar00zuulzuul00000000000000--- upgrade: - | The Glance service enables the API policies (RBAC) new defaults and scope by default. The Default value of config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed to ``True``. If you want to disable them then modify the below config options value in ``glance-api.conf`` file:: [oslo_policy] enforce_new_defaults=False enforce_scope=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/exp-emc-mig-fix-a7e28d547ac38f9e.yaml0000664000175000017500000000046400000000000024776 0ustar00zuulzuul00000000000000--- fixes: - | There was a bug in the **experimental** zero-downtime database upgrade path introduced in the Ocata release that prevented the **experimental** upgrade from working. This has been fixed in the Pike release. The bug did not affect the the normal database upgrade operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/expanding-stores-details-d3aa8ebb76ad68d9.yaml0000664000175000017500000000046000000000000027144 0ustar00zuulzuul00000000000000--- features: - | This release brings expansion in the functionality of stores-detail API. The stores detail API will list the way each store is configured, whereas previously this worked only for rbd store. The API remains admin-only by default as it exposes backend information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/experimental-multi-store-d2c26f9dbb9c835b.yaml0000664000175000017500000000400200000000000027121 0ustar00zuulzuul00000000000000--- features: - | This release provides an EXPERIMENTAL implementation of the Glance spec `Multi-Store Backend Support `_, which allows an operator to configure multiple backing stores so that end users may direct image data to be stored in a specific backend. See `Multi Store Support `_ in the Glance Administration Guide for more information. This experimental feature is optionally exposed as the EXPERIMENTAL Image Service API version 2.8. Its use in production systems is currently **not supported**. We encourage people to use this feature for testing purposes and report any issues so that it can be made stable and fully supported in the Stein release. other: - | The `Multi-Store Backend Support `_ feature is introduced on an experimental basis in the EXPERIMENTAL Image Service API version 2.8: * a new *list stores* call, `GET /v2/info/stores `_ * a new ``OpenStack-image-store-ids`` header in the `create image `_ response * an ``X-Image-Meta-Store`` header may be included with the `image data upload `_ request * an ``X-Image-Meta-Store`` header may be included with the `image import `_ request Please keep in mind that as version 2.8 of the Image Service API is EXPERIMENTAL, we reserve the right to make modifications to these aspects of the API should user feedback indicate that a change is required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/fix-md-tag-create-multiple-c04756cf5155983d.yaml0000664000175000017500000000050600000000000026702 0ustar00zuulzuul00000000000000--- features: - | A new optional header ``X-Openstack-Append`` has been added to append the new metadef tags to the existing tags. If the header is present it will append the new tags to the existing one, if not then it will default to the old behaviour i.e. overwriting the existing tags with the new one. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/fix-set-acls-bc17b5e125425c9b.yaml0000664000175000017500000000041600000000000024272 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2073945 `_: Fixed issue with VM creation in DCN cases with RBD backend where an edge node doesn't have the store defined which is part of the image locations and the operation fails. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/fix_1889640-95d543629d7dadce.yaml0000664000175000017500000000023300000000000023602 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1889640_: Image import might result 'active' image with no data. .. _1889640: https://bugs.launchpad.net/glance/+bug/1889640 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/fix_1889676-f8d302fd240c8a57.yaml0000664000175000017500000000031200000000000023515 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1889676_: "stores" can be set as property breaking multistore indication of stores where the images are present .. _1889676: https://bugs.launchpad.net/glance/+bug/1889676 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/fix_httpd_docs-3efff0395f96a94d.yaml0000664000175000017500000000022600000000000025103 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1887994_: Mixed message in admin docs to deploy under httpd .. _1887994: https://bugs.launchpad.net/glance/+bug/1887994 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/glance-unified-quotas-fba62fabb00379af.yaml0000664000175000017500000000055700000000000026401 0ustar00zuulzuul00000000000000--- features: - | Glance now has per-tenant quota support based on Keystone unified limits for resources like image and staging storage, among other things. For more information about how to configure and use these quotas, refer to the relevant section of the `Administrator Guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/glare-ectomy-72a1f80f306f2e3b.yaml0000664000175000017500000000424100000000000024363 0ustar00zuulzuul00000000000000--- upgrade: - | Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL API has been removed from the Glance codebase, as it was relocated into an independent `Glare`_ project repository during a previous release cycle. The database upgrade for the Glance Pike release drops the Glare tables (named 'artifacts' and 'artifact_*') from the Glance database. OpenStack deployments, packagers, and deployment projects which provided Glare should have begun to consume Glare from its own `Glare`_ respository during the Newton and Ocata releases. With the Pike release, it is no longer possible to consume Glare code from the Glance repository. .. _`Glare`: https://git.openstack.org/cgit/openstack/glare other: - | Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API has been `removed`_ from the Glance codebase. The Artifacts API was an EXPERIMENTAL API that ran on the Glance service endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service ran on its own endpoint (completely independent from the Glance service endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the Liberty and Mitaka releases, Glare ran on code stored in the Glance code repository and used its own tables in the Glance database. In the Newton release, the Glare code was relocated into its own `Glare`_ project repository. Also in the Newton release, Glare ran an EXPERIMENTAL Artifacts API versioned as ``v1.0`` on its own endpoint and used its own database. For the Pike release, the legacy Glare code has been removed from the Glance code repository and the legacy 'artifacts' and 'artifact_*' database tables are dropped from the Glance database. As the Artifacts service API was an EXPERIMENTAL API in Glance and has not used the Glance database since Mitaka, no provision is made for migrating data from the Glance database to the Glare database. .. _`removed`: http://specs.openstack.org/openstack/glance-specs/specs/mitaka/implemented/deprecate-v3-api.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/image-conversion-plugin-5aee45e1a1a5bb2b.yaml0000664000175000017500000000076400000000000026747 0ustar00zuulzuul00000000000000--- features: - | Automatic image conversion plugin for Interoperable Image Import. With this release operators can specify target image format and get all images created via the Image Import methods introduced in the Images API v2.6 converted automatically to that format. The feautre uses qemu-img under the hood which limits the source image formats that users can upload. Any image that fails the conversion when this plugin is enabled will fail the image creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/image-not-found-policy-override-removed-52616c483a270bcf.yaml0000664000175000017500000000072500000000000031461 0ustar00zuulzuul00000000000000--- deprecations: - | The policy check that we ran when an image is not found is removed. This previously allowed an operator to override the behavior of a 404 to be a 403, in contrast to the API documentation and design goals of defaulting to 404 for information-hiding reasons. This check is no longer run in the case of a NotFound result from the database, so any policy attempting to control that behavior will be ignored from now on. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/image-tasks-api-f21b42eab91c2079.yaml0000664000175000017500000000110200000000000024736 0ustar00zuulzuul00000000000000--- features: - | While fixing race condition issue during victoria we started updating 'message' property of the task which helps calculate time based on last updated time of task to burst the lock as well as show how much data has been copied of that image. As glance task API's are restricted from use by normal users we are adding new API /v2/images/{image_id}/tasks which will return all tasks associated with that image. In addition to task information this API will also return `request-id` and `user-id` to help users in debugging. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/image-visibility-changes-fa5aa18dc67244c4.yaml0000664000175000017500000002047100000000000026735 0ustar00zuulzuul00000000000000--- prelude: > - The *Community Images* feature has been introduced in the Images API v2. This enables a user to make an image available for consumption by all other users. In association with this change, the 'visibility' values for an image have been expanded to include 'community' and 'shared'. features: - | Image 'visibility' changes. * Prior to Ocata, an image with 'private' visibility could become shared by adding members to it, though its visibility remained 'private'. In order to make the visibility of images more clear, in Ocata the following changes are introduced: - A new value for visibility, 'shared', is introduced. Images that have or can accept members will no longer be displayed as having 'private' visibility, reducing confusion among end users. - An image must have 'shared' visibility in order to accept members. This provides a safeguard from 'private' images being shared inadvertently. - In order to preserve backward compatibilty with the current sharing workflow, the default visibility of an image in Ocata is 'shared'. Consistent with pre-Ocata behavior, this will allow the image to accept member operations without first updating the visibility of the image. (Keep in mind that an image with visibility 'shared' but having no members is not actually accessible to anyone other than the image owner, so this is not in itself a security problem.) - | Image visibility may be specified at the time of image creation. * As mentioned above, the default visibility of an image is 'shared'. If a user wants an image to be private and not accept any members, a visibility of 'private' can be explicitly assigned at the time of creation. - Such an image will require its visibility to be updated to 'shared' before it will accept members. - | Image visibility is changed using the image update (PATCH) call. * Note: This is not a change. It's simply mentioned for completeness. - | A new value for the Image 'visibility' field, 'community', is introduced. * An image with 'community' visibility is available for consumption by any user. * In order to prevent users spamming other users' image-list response, community images are not included in the image-list response unless specifically requested by a user. - For example, ``GET v2/images?visibility=community`` - As is standard behavior for the image-list call, other filters may be applied to the request. For example, to see the community images supplied by user ``931efe8a-0ad7-4610-9116-c199f8807cda``, the following call would be made: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-c199f8807cda`` upgrade: - | A new value for the Image 'visibility' field, 'community', is introduced. * The ability to update an image to have 'community' visibility is governed by a policy target named 'communitize_image'. The default is empty, that is, any user may communitize an image. - | Visibility migration of current images * Prior to Ocata, the Glance database did not have a 'visibility' column, but instead used a boolean 'is_public' column, which was translated into 'public' or 'private' visibility in the Images API v2 image response. As part of the upgrade to Ocata, a 'visibility' column is introduced into the images table. It will be populated as follows - All images currently with 'public' visibility (that is, images for which 'is_public' is True in the database) will have their visibility set to 'public'. - Images currently with 'private' visibility (that is, images for which 'is_public' is False in the database) **and** that have image members, will have their visibility set to 'shared'. - Those images currently with 'private' visibility (that is, images for which 'is_public' is False in the database) and that have **no** image members, will have their visibility set to 'private'. * Note that such images will have to have their visibility updated to 'shared' before they will accept members. - | Impact of the Ocata visibility changes on end users of the Images API v2 * We have tried to minimize the impact upon end users, but want to point out some issues to be aware of. - The migration of image visibility assigns sensible values to images, namely, 'private' to images that end users have *not* assigned members, and 'shared' to those images that have members at the time of the upgrade. Previously, if an end user wanted to share a private image, a member could be added directly. After the upgrade, the image will have to have its visibility changed to 'shared' before a member can be assigned. - The default value of 'shared' may seem weird, but it preserves the pre-upgrade workflow of: (1) create an image with default visibility, (2) add members to that image. Further, an image with a visibility of 'shared' that has no members is not accessible to other users, so it is functionally a private image. - The image-create operation allows a visibility to be set at the time of image creation. This option was probably not used much given that previously there were only two visibility values available, one of which ('public') is by default unassignable by end users. Operators may wish to update their documentation or tooling to specify a visibility value when end users create images. To summarize: * 'public' - reserved by default for images supplied by the operator for the use of all users * 'private' - the image is accessible only to its owner * 'community' - the image is available for consumption by all users * 'shared' - the image is completely accessible to the owner and available for consumption by any image members - | Impact of the Ocata visibility changes on the Images API v1 * The DEPRECATED Images API v1 does not have a concept of "visibility", and in a "pure" v1 deployment, you would not notice that anything had changed. Since, however, we hope that there aren't many of those around anymore, here's what you can expect to see if you use the Images API v1 in a "mixed" deployment. - In the v1 API, images have an ``is_public`` field (but no ``visibility`` field). Images for which ``is_public`` is True are the equivalent of images with 'public' visibility in the v2 API. Images for which ``is_public`` is false are the equivalent of v2 'shared' images if they have members, or the equivalent of v2 'private' images if they have no members. - An image that has 'community' visibility in the v2 API will have ``is_public`` == False in the v1 API. It will behave like a private image, that is, only the owner (or an admin) will have access to the image, and only the owner (or an admin) will see the image in the image-list response. - Since the default value for 'visibility' upon image creation is 'shared', an image freshly created using the v1 API can have members added to it, just as it did pre-Ocata. - If an image has a visiblity of 'private' when viewed in the v2 API, then that image will not accept members in the v1 API. If a user wants to share such an image, the user can: * Use the v2 API to change the visibility of the image to 'shared'. Then it will accept members in either the v1 or v2 API. * Use the v1 API to update the image so that ``is_public`` is False. This will reset the image's visibility to 'shared', and it will now accept member operations. * Note that in either case, when dealing with an image that has 'private' visibility in the v2 API, there is a safeguard against a user unintentionally adding a member to an image and exposing data. The safeguard is that you must perform an additional image update operation in either the v1 or v2 API before you can expose it to other users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/image_decompression_plugin-5f085666aae01f29.yaml0000664000175000017500000000037100000000000027316 0ustar00zuulzuul00000000000000--- features: - | New Interoperable Image Import plugin has been introduced to address the use case of providing compressed images either through 'web-download' or to optimize the network utilization between the client and Glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/immediate-caching-image-e38055575c361d32.yaml0000664000175000017500000000057100000000000026174 0ustar00zuulzuul00000000000000--- features: - | Added support to immediately start caching of an image. upgrade: - | Periodic job to prefetch image(s) into cache has been removed from the glance api service with config option ``cache_prefetcher_interval`` which was added as an interval for the same periodic job also been removed as image(s) will be immediately queued for caching. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/implement-lite-spec-db-sync-check-3e2e147aec0ae82b.yaml0000664000175000017500000000203000000000000030406 0ustar00zuulzuul00000000000000--- features: - | Added a new command ``glance-manage db check``, the command will allow a user to check the status of upgrades in the database. upgrade: - | Using db check In order to check the current state of your database upgrades, you may run the command ``glance-manage db check``. This will inform you of any outstanding actions you have left to take. Here is a list of possible return codes: - A return code of ``0`` means you are currently up to date with the latest migration script version and all ``db`` upgrades are complete. - A return code of ``3`` means that an upgrade from your current database version is available and your first step is to run ``glance-manage db expand``. - A return code of ``4`` means that the expansion stage is complete, and the next step is to run ``glance-manage db migrate``. - A return code of ``5`` means that the expansion and data migration stages are complete, and the next step is to run ``glance-manage db contract``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/import-locking-behavior-901c691f3839fe0a.yaml0000664000175000017500000000120200000000000026454 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1884596_: A change was added to the import API which provides time-based locking of an image to exclude other import operations from starting until the lock-holding task completes. The lock is based on the task that we start to do the work, and the UUID of that task is stored in the ``os_glance_import_task`` image property, which indicates who owns the lock. If the task holding the lock fails to make progress for 60 minutes, another import operation will be allowed to steal the lock and start another import operation. .. _1884596: https://bugs.launchpad.net/glance/+bug/1884596 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/import-multi-stores-3e781f2878b3134d.yaml0000664000175000017500000000204000000000000025623 0ustar00zuulzuul00000000000000--- features: - | Add ability to import image into multiple stores during `interoperable image import process`_. upgrade: - | Add ability to import image into multiple stores during `interoperable image import process`_. This feature will only work if multiple stores are enabled in the deployment. It introduces 3 new optional body fields to the `import API path`: - ``stores``: List containing the stores id to import the image binary data to. - ``all_stores``: To import the data in all configured stores. - ``all_stores_must_succeed``: Control wether the import have to succeed in all stores. Users can follow workflow execution with 2 new reserved properties: - ``os_glance_importing_to_stores``: list of stores that has not yet been processed. - ``os_glance_failed_import``: Each time an import in a store fails, it is added to this list. .. _`interoperable image import process`: https://developer.openstack.org/api-ref/image/v2/#interoperable-image-import ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/improved-config-options-221c58a8c37602ba.yaml0000664000175000017500000000245100000000000026471 0ustar00zuulzuul00000000000000--- prelude: > - Improved configuration option descriptions and handling. other: - | The glance configuration options have been improved with detailed help texts, defaults for sample configuration files, explicit choices of values for operators to choose from, and a strict range defined with ``min`` and ``max`` boundaries. * It must be noted that the configuration options that take integer values now have a strict range defined with ``min`` and/or ``max`` boundaries where appropriate. * This renders the configuration options incapable of taking certain values that may have been accepted before but were actually invalid. * For example, configuration options specifying counts, where a negative value was undefined, would have still accepted the supplied negative value. Such options will no longer accept negative values. * Options where a negative value was previously defined (for example, -1 to mean unlimited) will remain unaffected by this change. * Values which do not comply with the new restrictions will prevent the service from starting. The logs will contain a message indicating the problematic configuration option and the reason why the supplied value has been rejected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/location-add-status-checks-b70db66100bc96b7.yaml0000664000175000017500000000344300000000000027112 0ustar00zuulzuul00000000000000--- prelude: > - Adding locations to a non-active or non-queued image is no longer allowed. critical: - | Attempting to set image locations to an image *not* in ``active`` or ``queued`` status will now result in a HTTP Conflict (HTTP status code 409) to the user. * Until now, no image status checks were in place while **adding** a location on it. In some circumstances, this may result in a bad user experience. It may also cause problems for a security team evaluating the condition of an image in ``deactivated`` status. * **Adding** locations is disallowed on the following image statuses - ``saving``, ``deactivated``, ``deleted``, ``pending_delete``, ``killed``. * Note that there are race conditions associated with adding a location to an image in the ``active``, ``queued``, ``saving``, or ``deactivated`` status. Because these are non-terminal image statuses, it is possible that when a user attempts to add a location, a status transition could occur that might block the **add** (or might appear to allow an add that should not be allowed). * For example, a user is not allowed to add a location to an image in ``saving`` status. Suppose a user decides to add a location anyway. It is possible that before the user's request is processed, the transmission of data being saved is completed and the image transitioned into ``active`` status, in which case the user's add location request will succeed. To the user, however, this success will appear anomalous because in most cases, an attempt to add a location to an image in ``saving`` status will fail. * We mention this so that you can be aware of this situation in your own testing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/lock_path_config_option-2771feaa649e4563.yaml0000664000175000017500000000032400000000000026610 0ustar00zuulzuul00000000000000--- upgrade: - The lock_path config option from oslo.concurrency is now required for using the sql image_cache driver. If one is not specified it will default to the image_cache_dir and emit a warning. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/make-cinder-url-compatible-with-locations-1f3e938ff7e11c7d.yaml0000664000175000017500000000062600000000000032135 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2054575 `_: Fixed the issue when cinder uploads a volume to glance in the optimized path and glance rejects the request with invalid location. Now we convert the old location format sent by cinder into the new location format supported by multi store, hence allowing volumes to be uploaded in an optimized way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml0000664000175000017500000000133200000000000030546 0ustar00zuulzuul00000000000000--- deprecations: - The task API was added to allow users for uploading images asynchronously and for deployers to have more control in the upload process. Unfortunately, this API has not worked the way it was expected to. Therefore, the task API has entered a deprecation period and it is meant to be replaced by the new import API. This change makes the task API admin only by default so that it is not accidentally deployed as a public API. upgrade: - The task API is being deprecated and it has been made admin only. If deployers of Glance would like to have this API as a public one, it is necessary to change the `policy.json` file and remove `role:admin` from every `task` related field.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/metadef-api-admin-operations-b9a2d863913b0cae.yaml0000664000175000017500000000053300000000000027477 0ustar00zuulzuul00000000000000--- security: - | The default policy for the `metadef` API has changed from "open to everyone" to "only admins can create and modify resources". We believe that this is by far the most common use-case and the only sane default. See Bug 1916926_ for more details. .. _1916926: https://bugs.launchpad.net/glance/+bug/1916926/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/multihash-081466a98601da20.yaml0000664000175000017500000000546300000000000023557 0ustar00zuulzuul00000000000000--- features: - | This release implements the Glance spec `Secure Hash Algorithm Support `_ (also known as "multihash"). This feature supplements the current 'checksum' image property with a self-describing secure hash. The self-description consists of two new image properties: * ``os_hash_algo`` - this contains the name of the secure hash algorithm used to generate the value on this image * ``os_hash_value`` - this is the hexdigest computed by applying the secure hash algorithm named in the ``os_hash_algo`` property to the image data These are read-only image properties and are not user-modifiable. The secure hash algorithm used is an operator-configurable setting. See the help text for 'hashing_algorithm' in the sample Glance configuration file for more information. The default secure hash algorithm is SHA-512. It should be suitable for most applications. The legacy 'checksum' image property, which provides an MD5 message digest of the image data, is preserved for backward compatibility. issues: - | The ``os_hash_value`` image property, introduced as part of the `Secure Hash Algorithm Support `_ ("multihash") feature, is limited to 128 characters. This is sufficient to store 512 bits as a hexadecimal numeral. - | The "multihash" implemented in this release (`Secure Hash Algorithm Support `_) is computed only for new images. There is no provision for computing the multihash for existing images. Thus, users should expect to see JSON 'null' values for the ``os_hash_algo`` and ``os_hash_value`` image properties on images created prior to the installation of the Rocky release at your site. security: - | This release implements the Glance spec `Secure Hash Algorithm Support `_, which introduces a self-describing "multihash" to the image-show response. This feature supplements the current 'checksum' image property with a self-describing secure hash. The default hashing algorithm is SHA-512, which is currently considered secure. In the event that algorithm is compromised, you will immediately be able to begin using a different algorithm (as long as it's supported by the Python 'hashlib' library and has output that fits in 128 characters) by modifying the value of the 'hashing_algorithm' configuration option and either restarting or issuing a SIGHUP to Glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/mutistore-support-for-scrubber-6b360394ef32774a.yaml0000664000175000017500000000135500000000000030006 0ustar00zuulzuul00000000000000--- features: - | The glance-scrubber utility is now multistore aware. If you are using the multistore feature, you must define configuration options for ``os_glance_tasks_store`` and ``os_glance_staging_store`` in the ``glance-scrubber.conf`` file. See the "Reserved Stores" section of the "Multi Store Support" chapter of the Glance Administration Guide for more information. upgrade: - | If you are using the multistore feature, you must define configuration options for ``os_glance_tasks_store`` and ``os_glance_staging_store`` in the ``glance-scrubber.conf`` file. See the "Reserved Stores" section of the "Multi Store Support" chapter of the Glance Administration Guide for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml0000664000175000017500000000144500000000000025512 0ustar00zuulzuul00000000000000--- features: - Implement the ability to filter images by the properties `id`, `name`, `status`,`container_format`, `disk_format` using the 'in' operator between the values. Following the pattern of existing filters, new filters are specified as query parameters using the field to filter as the key and the filter criteria as the value in the parameter. Filtering based on the principle of full compliance with the template, for example 'name = in:deb' does not match 'debian'. Changes apply exclusively to the API v2 Image entity listings An example of an acceptance criteria using the 'in' operator for name ?name=in:name1,name2,name3. These filters were added using syntax that conforms to the latest guidelines from the OpenStack API Working Group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/newton-1-release-065334d464f78fc5.yaml0000664000175000017500000000154700000000000024747 0ustar00zuulzuul00000000000000--- prelude: > - Glance no longer returns a 500 when 4 byte unicode characters are passed to the metadefs API. - Deprecated "sign-the-hash" approach for image signing. Old run_tests and related scripts have been removed. upgrade: - The image signature verification feature has been updated to follow the "sign-the-data" approach, which uses a signature of the image data directly. The prior deprecated "sign-the-hash" approach, which uses a signature of an MD5 hash of the image data, has been removed. security: - The initial implementation of the image signature verification feature in Glance was insecure, because it relied on an MD5 hash of the image data. More details can be found in bug 1516031. This "sign-the-hash" approach was deprecated in Mitaka, and has been removed in Newton. Related CVE-2015-8234. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/newton-bugs-06ed3727b973c271.yaml0000664000175000017500000000467600000000000024131 0ustar00zuulzuul00000000000000--- fixes: - | Here is a list of other important bugs that have been fixed (or partially fixed) along with their descriptions. * bug 1617258: Image signature base64 needs to wrap lines * bug 1612341: Add cpu thread pinning flavor metadef * bug 1609571: version negotiation api middleware was NOT up to date to include v2.3 * bug 1602081: Glance needs to use oslo.context's policy dict * bug 1599169: glance-replicator size raises object of type 'NoneType' has no len() exception when no args provided * bug 1599192: glance-replicator needs to display human-readable size * bug 1585917: member-create will raise 500 error if member-id is greater than 255 characters * bug 1598985: glance-replicator compare output should show image name in addition to image id for missing images * bug 1533949: Glance tasks missing configuration item "conversion_format" * bug 1593177: The default policy needs to be admin for safer default deployment scenarios * bug 1584076: Swift ACLs disappears on v1 Glance images * bug 1591004: Unable to download image with no checksum when cache is enabled * bug 1584415: Listing images with the created_at and updated_at filters fails if an operator is not specified * bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo.middleware library * bug 1584350: etc/glance-registry.conf sample file has redundant store section * bug 1543937: db-purge fails for very large number * bug 1580848: There's no exception when import task is created without properties * bug 1585584: Glare v0.1 is unable to create public artifact draft * bug 1582304: Allow tests to run when http proxy is set * bug 1570789: Metadefs API returns 500 error when 4 byte unicode character is passed * bug 1532243: glance fails silently if a task flow can not be loaded * bug 1568894: glance_store options missing in glance-scrubber.conf and glance-cache.conf sample files * bug 1568723: secure_proxy_ssl_header not in sample configuration files * bug 1535231: md-meta with case insensitive string has problem during creating * bug 1555275: Tags set changes on delete * bug 1558683: Versions endpoint does not support X-Forwarded-Proto * bug 1557495: Possible race conditions during status change ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/no_plugins_for_copy-image-26c0e384a368bf6a.yaml0000664000175000017500000000022700000000000027136 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1885725_: 'copy-image' import job should not run additional plugins .. _1885725: https://code.launchpad.net/bugs/1885725 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/os-glance-injection-disallowed-5dad244dfb071938.yaml0000664000175000017500000000066000000000000027757 0ustar00zuulzuul00000000000000--- upgrade: - | The ``inject_image_metadata`` task will no longer allow setting properties in the reserved ``os_glance_*`` namespace, in line with the blanket prohibition on such via the API. It has always been dangerous to do this, so no operator should have any such configuration in production. If any keys in this namespace are set, they will be dropped (and logged) during the injection process. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/os-glance-namespace-reserved-1fcb8a5fddca4e0f.yaml0000664000175000017500000000045700000000000030000 0ustar00zuulzuul00000000000000--- upgrade: - | Glance now prevents setting or modifying image properties that are within the ``os_glance`` reserved namespace. Previously, individual properties (such as ``os_glance_importing_to_stores``) were inconsistently disallowed, but now the entire namespace is enforced. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/oslo-log-use-stderr-changes-07f5daf3e6abdcd6.yaml0000664000175000017500000000143400000000000027535 0ustar00zuulzuul00000000000000--- upgrade: - A recent change to oslo.log (>= 3.17.0) set the default value of ``[DEFAULT]/use_stderr`` to ``False`` in order to prevent duplication of logs (as reported in bug \#1588051). Since this would change the current behaviour of certain glance commands (e.g., glance-replicator, glance-cache-manage, etc.), we chose to override the default value of ``use_stderr`` to ``True`` in those commands. We also chose not to override that value in any Glance service (e.g., glance-api, glance-registry) so that duplicate logs are not created by those services. Operators that have a usecase that relies on logs being reported on standard error may set ``[DEFAULT]/use_stderr = True`` in the appropriate service's configuration file upon deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/pending-delete-rollback-444ff94c0056bbdb.yaml0000664000175000017500000000046700000000000026537 0ustar00zuulzuul00000000000000--- features: - | ``glance-scrubber`` now support to restore the image's status from `pending_delete` to `active`. The usage is `glance-scrubber --restore `. Please make sure the ``glance-scrubber`` daemon is stopped before restoring the image to avoid image data inconsistency. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/pike-metadefs-changes-95b54e0bf8bbefd6.yaml0000664000175000017500000000111200000000000026346 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified in the Pike release: * The property ``img_hide_hypervisor_id`` has been added to the namespace ``OS::Compute::LibvirtImage``. * Several `new values`_ were added for the ``vmware_ostype`` property in the ``OS::Compute::VMware`` namespace. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` .. _`new values`: https://git.openstack.org/cgit/openstack/glance/commit/?id=b505ede170837c50db41a71b46075d4b211c8a48././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/pike-rc-1-a5d3f6e8877b52c6.yaml0000664000175000017500000000475700000000000023523 0ustar00zuulzuul00000000000000--- features: - | The image-list call to the Images v2 API now recognizes a ``protected`` query-string parameter. This parameter accepts only two values: either ``true`` or ``false``. The filter is case-sensitive. Any other value will result in a 400 response to the request. See the `protected filter specification`_ document for details. .. _`protected filter specification`: https://specs.openstack.org/openstack/glance-specs/specs/pike/implemented/glance/add-protected-filter.html upgrade: - | You may set the ``timeout`` option in the ``keystone_authtoken`` group in the **glance-api.conf** file. fixes: - | The following are some highlights of the bug fixes included in this release. * Bug 1655727_: Invoke monkey_patching early enough for eventlet 0.20.1 * Bug 1657459_: Fix incompatibilities with WebOb 1.7 * Bug 1554412_: Provide user friendly message for FK failure * Bug 1664709_: Do not serve partial image download requests from cache * Bug 1482129_: Remove duplicate key from dictionary * Bug 1229823_: Handle file delete races in image cache * Bug 1686488_: Fix glance image-download error * Bug 1516706_: Prevent v1_api from making requests to v2_registry * Bug 1701346_: Fix trust auth mechanism .. _1655727: https://code.launchpad.net/bugs/1655727 .. _1657459: https://code.launchpad.net/bugs/1657459 .. _1554412: https://code.launchpad.net/bugs/1554412 .. _1664709: https://code.launchpad.net/bugs/1664709 .. _1482129: https://code.launchpad.net/bugs/1482129 .. _1229823: https://code.launchpad.net/bugs/1229823 .. _1686488: https://code.launchpad.net/bugs/1686488 .. _1516706: https://code.launchpad.net/bugs/1516706 .. _1701346: https://code.launchpad.net/bugs/1701346 other: - | The `documentation was reorganized`_ in accord with the new standard layout for OpenStack projects. - | Glance now uses the `python 'cryptography' module`_ instead of the 'pycrypto' module. - | In accord with current OpenStack policy, Glance log messages are `no longer translated`_. .. _`documentation was reorganized`: http://specs.openstack.org/openstack/docs-specs/specs/pike/os-manuals-migration.html .. _`python 'cryptography' module`: https://git.openstack.org/cgit/openstack/glance/commit/?id=5ebde9079b34544cc6642a73b40ec865bcef8580 .. _`no longer translated`: https://git.openstack.org/cgit/openstack/glance/commit/?id=87a56ce5c78952c5cccf8c6c280ec1e9a60b0b6c././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/pike-rc-2-acc173005045e16a.yaml0000664000175000017500000000761100000000000023373 0ustar00zuulzuul00000000000000--- features: - | A new policy, ``tasks_api_access`` has been introduced so that ordinary user credentials may be used by Glance to manage the tasks that accomplish the interoperable image import process without requiring that operators expose the Tasks API to end users. upgrade: - | If you wish to enable the EXPERIMENTAL version 2.6 API that contains the new interoperable image import functionality, set the configuration option ``enable_image_import`` to True in the glance-api.conf file. The default value for this option is False. The interoperable image import functionality uses the Glance tasks engine. This is transparent to end users, as they do *not* use the Tasks API for the interoperable image import workflow. The operator, however, must make sure that the following configuration options are set correctly. - ``enable_image_import`` - ``node_staging_uri`` - the options in the ``[task]`` group - the options in the ``[taskflow_executor]`` group See the documentation in the sample glance-api.conf file for more information. Additionally, you will need to verify that the task-related policies in the Glance policy.json file are set correctly. These settings are described below. - | A new policy, ``tasks_api_access`` has been introduced so that ordinary user credentials may be used by Glance to manage the tasks that accomplish the interoperable image import process without requiring that operators expose the Tasks API to end users. The `Tasks API`_ was made admin-only by default in Mitaka by restricting the following policy targets to **role:admin**: **get_task**, **get_tasks**, **add_task**, and **modify_task**. The new ``tasks_api_access`` policy target directly controls access to the Tasks API, whereas targets just mentioned indirectly affect what can be manipulated via the API by controlling what operations can be performed on Glance's internal task objects. The key point is that if you want to expose the new interoperable image import process to end users while keeping the Tasks API admin-only, you can accomplish this by using the following settings: .. code-block:: none "get_task": "", "get_tasks": "", "add_task": "", "modify_task": "", "tasks_api_access": "role:admin", To summarize: end users do **not** need access to the Tasks API in order to use the new interoperable image import process. They do, however, need permission to access internal Glance task objects. We recommend that all operators adopt the policy settings just described independently of the decision whether to expose the EXPERIMENTAL version 2.6 API. .. _`Tasks API`: https://developer.openstack.org/api-ref/image/v2/index.html#tasks security: - | A new policy, ``tasks_api_access`` has been introduced so that ordinary user credentials may be used by Glance to manage the tasks that accomplish the interoperable image import process without requiring that operators expose the Tasks API to end users. This is a good time to review your Glance ``policy.json`` file to make sure that if it contains a ``default`` target, the rule is fairly restrictive ("role:admin" or "!" are good choices). The ``default`` target is used when the policy engine cannot find the target it's looking for. This can happen when a new policy is introduced but the policy file in use is from a prior release. other: - | The Image Service API Reference has been updated with a section on the `Interoperable image import`_ process (also known as "image import refactored") and the API calls that are exposed to implement it in the EXPERIMENTAL v2.6 of the API. .. _`Interoperable image import`: https://developer.openstack.org/api-ref/image/v2/index.html#interoperable-image-import ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/policy-in-code-7e0c6c070d32d136.yaml0000664000175000017500000000231000000000000024515 0ustar00zuulzuul00000000000000--- upgrade: - | Policy defaults are now defined in code, as they already were in other OpenStack services. After upgrading there is no need to provide a ``policy.json`` file (and you should not do so) unless you want to override the default policies, and only policies you want to override need be mentioned in the file. You should no longer rely on the ``default`` rule, and especially not the default value of the rule (which has been relaxed), to assign a non-default policy to rules not explicitly specified in the policy file. security: - | If the existing ``policy.json`` file relies on the ``default`` rule for some policies (i.e. not all policies are explicitly specified in the file) then the ``default`` rule must be explicitly set (e.g. to ``"role:admin"``) in the file. The new default value for the ``default`` rule is ``""``, whereas since the Queens release it has been ``"role:admin"`` (prior to Queens it was ``"@"``, which allows everything). After upgrading to this release, the policy file should be replaced by one that overrides only policies that need to be different from the defaults, without relying on the ``default`` rule. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/policy-in-code-implications-438449a73af2893c.yaml0000664000175000017500000000133400000000000027160 0ustar00zuulzuul00000000000000--- upgrade: - | Operators who use property protections with the ``property_protection_rule_format`` set to ``policies`` must still define the policy rules used for property protections in a policy file. The content of the file may be JSON or YAML. Additionally, we suggest that the absolute pathname of this file be set as the value of ``policy_file`` in the ``[oslo_policy]`` section of the ``glance-api.conf`` file. Be aware that if you define a policy rule for ``default`` or ``context_is_admin``, that policy rule will also be used by the policies that govern permissions to perform actions using the Images API, even if these actions are not specified in the policy file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/policy-refactor-xena-0cddb7f2d492cb3a.yaml0000664000175000017500000000131100000000000026237 0ustar00zuulzuul00000000000000--- security: - | The Xena release of Glance is a midpoint in the process of refactoring how our policies are applied to API operations. The goal of applying policy enforcement in the API will ultimately increase the flexibility operators have over which users can do what operations to which images, and provides a path for compliant Secure RBAC and scoped tokens. In Xena, some policies are more flexible than they once were, allowing for more fine-grained assignment of responsibilities, but not all things are possible yet. If `enforce_secure_rbac` is not enabled, most things are still enforcing the legacy behavior of hard and fast admin-or-owner requirements. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/queens-metadefs-changes-daf02bef18d049f4.yaml0000664000175000017500000000173200000000000026635 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified in the Queens release: * The property img_linked_clone_ has been added to the namespace ``OS::Compute::VMware``. * An enumeration of values was added for the `vmware:hw_version`_ property in the ``OS::Compute::VMwareFlavor`` namespace. * Additional values were added to the enumeration for the `hw_disk_bus`_ property in the ``OS::Compute::LibvirtImage`` namespace. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` .. _img_linked_clone: https://git.openstack.org/cgit/openstack/glance/commit/?id=5704ba6305b8aec380f90c3a35cbc4031f54f112 .. _`vmware:hw_version`: https://git.openstack.org/cgit/openstack/glance/commit/?id=c1a845d5532ae43248dd4b9714ffa0a403737cf7 .. _`hw_disk_bus`: https://git.openstack.org/cgit/openstack/glance/commit/?id=f8a5a4022441617aaa508e8e59f542d047ba5ba2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/queens-uwsgi-issues-4cee9e4fdf62c646.yaml0000664000175000017500000000332200000000000026123 0ustar00zuulzuul00000000000000--- issues: - | The Pike release notes pointed out that although support had been added to run Glance as a WSGI application hosted by a web server, the Glance team recommended that Glance be run in its normal standalone configuration, particularly in production environments. We renew that recommendation for the Queens release. In particular, Glance tasks (which are required for the interoperable image import functionality) do not execute when Glance is run under uWSGI (which is the OpenStack recommended way to run WSGI applications hosted by a web server). This is in addition to the chunked transfer encoding problems addressed by `Bug 1703856`_ and will be more difficult to fix. (Additionally, as far as we are aware, the fix for `Bug 1703856`_ has never been tested at scale.) Briefly, Glance tasks are run by the API service and would have to be split out into a different service so that API alone would run under uWSGI. The Glance project team did not have sufficient testing and development resources during the Queens cycle to attempt this (or even to discuss whether this is in fact a good idea). The Glance project team is committed to the stability of Glance. As part of OpenStack, we are committed to `The Four Opens`_. If the ability to run Glance under uWSGI is important to you, feel free to participate in the Glance community to help coordinate and drive such an effort. (We gently remind you that "participation" includes providing testing and development resources.) .. _`Bug 1703856`: https://bugs.launchpad.net/glance/+bug/1703856 .. _`The Four Opens`: https://governance.openstack.org/tc/reference/opens.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/range-header-request-83cf11eebf865fb1.yaml0000664000175000017500000000110600000000000026152 0ustar00zuulzuul00000000000000--- fixes: - | Glance had been accepting the Content-Range header for GET v2/images/{image_id}/file requests, contrary to RFC 7233. Following RFC 7233, Glance will now: * Accept the Range header in requests to serve partial images. * Include a ``Content-Range`` header upon successful delivery of the requested partial content. Please note that not all Glance storage backends support partial downloads. A Range request to a Glance server with such a backend will result in the entire image content being delivered despite the 206 response code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/rbac-updates-ba0fcb886fe4085c.yaml0000664000175000017500000000211200000000000024511 0ustar00zuulzuul00000000000000--- features: - | The Glance policies have been modified to drop the system scope. Every API policy is scoped to project. This means that system scoped users will get 403 permission denied error. Also, the project reader role is ready to use. Users with reader role can only perform the read-only operations within their project. This role can be used for the audit purposes. For the details on what changed from the existing policy, please refer to the `RBAC new guidelines`_. We have implemented only phase-1 of the `RBAC new guidelines`_. Currently, scope checks and new defaults are disabled by default. You can enable them by switching the below config option in ``glance.conf`` file:: [oslo_policy] enforce_new_defaults=True enforce_scope=True We recommend to enable the both scope as well new defaults together otherwise you may experience some late failures with unclear error messages. .. _`RBAC new guidelines`: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-admin_role-f508754e98331fc4.yaml0000664000175000017500000000032300000000000025257 0ustar00zuulzuul00000000000000--- upgrade: - | The Glance API configuration option ``admin_role``, having been deprecated in the Ussuri release, is now removed. If present in a configuration file, it will be silently ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-allow_additional_image_properties-ae33902e7967661f.yaml0000664000175000017500000000022700000000000032074 0ustar00zuulzuul00000000000000--- upgrade: - The ``allow_additional_image_properties`` configuration option, which was deprecated in Ussuri, has been removed in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-db-downgrade-0d1cc45b97605775.yaml0000664000175000017500000000067000000000000025477 0ustar00zuulzuul00000000000000--- prelude: > - Database downgrades have been removed from the Glance source tree. upgrade: - The ``db_downgrade`` command has been removed from the ``glance-manage`` utility and all database downgrade scripts have been removed. In accord with OpenStack policy, Glance cannot be downgraded any more. Operators are advised to make a full database backup of their production data before attempting any upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-enforce-secure-rbac-ec9a0249870460c2.yaml0000664000175000017500000000115100000000000026742 0ustar00zuulzuul00000000000000--- upgrade: - | As per the revised SRBAC community goals, glance service is switching to new defaults by default in Antelope cycle, hence removing the deprecated ``enforce_secure_rbac`` option which is no longer needed. The ``enforce_secure_rbac`` option was introduced EXPERIMENTAL in Wallaby release for operators to opt into enforcing authorization based on common RBAC personas. Now operator can control the scope and new defaults flag with the below config options in ``glance-api.conf`` file:: [oslo_policy] enforce_new_defaults=True enforce_scope=True././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml0000664000175000017500000000130700000000000031217 0ustar00zuulzuul00000000000000--- deprecations: - OSprofiler support requires passing of trace information between various OpenStack services. This information is signed by one of HMAC keys, which we historically defined in glance-api-paste.ini and glance-registry-paste.ini files (together with enabled option, that in fact was duplicated in the corresponding configuration files). OSprofiler 0.3.1 and higher supports passing this information via configuration files, therefore it's recommended to modify the ``[filter:osprofiler]`` section in \*-paste.ini to look like ``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` and set the ``hmac_keys`` option in the glance-\*.conf files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-owner_is_tenant-b30150def293effc.yaml0000664000175000017500000000075100000000000026630 0ustar00zuulzuul00000000000000--- upgrade: - The ``owner_is_tenant`` configuration option, which was deprecated in Rocky, has been removed in this release. As announced in the spec `Deprecate owner_is_tenant `_, given that an operator survey indicated that this option was only used in its default value of ``True``, no database migration is included in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove-s3-driver-639c60b71761eb6f.yaml0000664000175000017500000000075200000000000025043 0ustar00zuulzuul00000000000000--- prelude: > - The ``s3`` store driver has been removed. upgrade: - The latest release of glance_store library does not have the support for the ``s3`` driver. All code references of the same have been removed from the library. As this release of Glance uses the updated glance_store library, you will find the ``s3`` driver support removed from Glance too. For example the Glance image location strategy modules no longer offer the ``s3`` driver support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove_enable_image_import_option-ec4a859ac9a7ea7b.yaml0000664000175000017500000000055000000000000031156 0ustar00zuulzuul00000000000000--- prelude: > Removed the deprecated 'enable_image_import' config option. Image import will be always enabled from this release onwards as designed. upgrade: - | As Image Import will be always enabled, care needs to be taken that it is configured properly from this release forward. The 'enable_image_import' option is silently ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove_native_ssl-c16d5a127b57583d.yaml0000664000175000017500000000143400000000000025446 0ustar00zuulzuul00000000000000--- upgrade: - | If upgrade is conducted from PY27 where ssl connections has been terminated into glance-api, the termination needs to happen externally from now on. security: - | The ssl support from Glance has been removed as it worked only under PY27 which is not anymore supported environment. Termination of encrypted connections needs to happen externally as soon as move to PY3 happens. Any deployment needing end to end encryption would need to put either reverse proxy (using fully blown http server like Apache or Nginx will cause significant performance hit and we advice using something more simple that does not break the http protocol) in front of the service or utilize ssl tunneling (like stunnel) between loadbalancers and glance-api. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/remove_secure_proxy_ssl_header-2a95ad48ffa471ad.yaml0000664000175000017500000000051100000000000030432 0ustar00zuulzuul00000000000000--- deprecations: - | Removed the deprecated 'secure_proxy_ssl_header' config option. Image import will be always enabled from this release onwards as designed. upgrade: - | As Glance relies on oslo.middleware for this feature, care needs to be taken that it is configured properly from this release forward. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/removed-location-strategy-functionality-b1b562e68608a6f8.yaml0000664000175000017500000000020700000000000031724 0ustar00zuulzuul00000000000000--- upgrade: - The ``location_strategy`` functionality which was deprecated in Bobcat(2023.2), has been removed in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/reordered-store-config-opts-newton-3a6575b5908c0e0f.yaml0000664000175000017500000000250200000000000030563 0ustar00zuulzuul00000000000000--- prelude: > - Sample configuration file shipped with Glance source now has reordered store drivers configuration options for future consistent ordering. other: - | The sample configuration files autogenerated using the oslo-config-generator tool now give consistent ordering of the store drivers configurations. * Some operators have reported issues with reordering observed in the sample configurations shipped with Glance release tarballs. This reordering may result into a incorrect "diff" of the configurations used downstream vs. newly introduced upstream. * Latest release of ``glance_store`` library (used in the **Newton** release of Glance) will include fix for the ``glance_store`` bug 1619487. * Until now every run of the oslo-config-generator resulted in random ordering of the store drivers configuration. After **Newton** release this order will remain consistent. * The store drivers configuration order in the sample or autogenerated files should be expected to be alphabetical as - ``cinder``, ``filesystem``, ``http``, ``rbd``, ``sheepdog``, ``swift``, ``vmware``. * Note the code name for the "ceph" driver is ``rbd``. * Note the ordering of the options within a store is not alphabetical. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/replicator-token-cleanup-4a573c86f1acccc0.yaml0000664000175000017500000000037400000000000027044 0ustar00zuulzuul00000000000000--- upgrade: - | The ``glance-replicator`` options ``mastertoken`` and ``slavetoken`` were deprecated in the Pike release cycle. These options have now been removed. The options ``sourcetoken`` and ``targettoken`` should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/restrict_location_updates-05454bb765a8c92c.yaml0000664000175000017500000000153700000000000027204 0ustar00zuulzuul00000000000000--- prelude: > Location updates for images are now restricted to images in ``active`` or ``queued`` status. Please refer to the "Bug Fixes" section for more information. fixes: - | Image location updates to an image which is not in ``active`` or ``queued`` status can introduce race conditions and security issues and hence a bad experience for users and operators. As a result, we have restricted image location updates in this release. Users will now observe the following: * HTTP Response Code 409 (Conflict) will be returned in response to an attempt to remove an image location when the image status is not ``active`` * HTTP Response Code 409 (Conflict) will be returned in response to an attempt to replace an image location when the image status is not ``active`` or ``queued`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/rethinking-filesystem-access-120bc46064b3d40a.yaml0000664000175000017500000000525700000000000027466 0ustar00zuulzuul00000000000000--- features: - | With the introduction of the Glance multiple stores feature, introduced on an experimental basis in Rocky and now established as a full feature in the Train release, it is now possible for Glance to use backends accessed via the glance_store library for the temporary storage of data that previously required access to the local filesystem. Please note the following: * In this release, the use of stores (instead of local directories) is optional, but it will become mandatory for the 'U' release. * In this release, the stores used *must* be the filesystem store type. Our goal is that in a future release, operators will be able to configure a store of their choice for these functions. In Train, however, each of these *must* be a filesystem store. Please see the Upgrades section of this document and the "Multi Store Support" chapter of the Glance Administration Guide for more information. upgrade: - | The configuration options ``work_dir`` and ``node_staging_uri`` are deprecated and will be removed early in the 'U' development cycle. These local directories are used by Glance for the temporary storage of data during the interoperable image import process and by the tasks engine. This release introduces the ability to instead use a backend filesystem store accessed via the glance_store library for this temporary storage. Please note the following: * If you wish to use the backend store feature now, please see the "Reserved Stores" section of the "Multi Store Support" chapter of the Glance Administration Guide for configuration information. * If you use the Glance multiple stores feature, introduced on an experimental basis in Rocky and now fully supported in the Train release, then you *must* use backing stores instead of ``work_dir`` and ``node_staging_uri`` for Glance's temporary storage **beginning right now with the current release**. See the "Reserved Stores" section of the "Multi Store Support" chapter of the Glance Administration Guide for more information. - | The store name prefix ``os_glance_*`` is reserved by Glance for internal stores. Glance will refuse to start if a store with this prefix is included in the ``enabled_backends`` option. The internal store identifiers introduced in this release are ``os_glance_tasks_store`` and ``os_glance_staging_store``. issues: - | When using the multiple stores feature, each filesystem store **must** be configured with a different value for the ``filesystem_store_datadir`` option. This is not currently enforced in the code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/rocky-metadefs-changes-cb00c006ff51b541.yaml0000664000175000017500000000073600000000000026305 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified in the Rocky release: * There was a typographical error in the properties target for the ``OS:::Nova::Server`` resource type association in the ``CIM::ProcessorAllocationSettingData`` namespace. It has been been corrected to ``scheduler_hints``. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/rocky-rc-b0ea7628b7a74c96.yaml0000664000175000017500000000363100000000000023542 0ustar00zuulzuul00000000000000--- prelude: | This release of OpenStack Glance introduces 2 new API versions. Images API v2.7 adds support and modifications for the Hidden Images and Multihash features introduced during Rocky cycle. Version 2.8 is included as an optional EXPERIMENTAL API for testing and preparing for multiple back-end support. Rocky development cycle marks long waited milestone on Glance work. The Images API v1 which has been deprecated for years is finally removed and not available at all in Glance version 17.0.0 forward. Some security aspects were tackled for this release. Multihash, providing secure hashing for image data with future proof options marks the end of relying upon MD5 checksums when verifying image payloads. OSSN-0075 migitation lessens the risk of ID reusability on those very rare cases when a database purge is necessary. When delayed delete is enabled operators are able to recover image records if the scrubber has been stopped before the data removal interval. While the image metadata is still not preserved in these cases, this provides a way to save the image data on accidental deletes. Due to increasing core counts on modern servers Glance services started consuming huge amounts of resources as the default was to spin up equal amount of workers to logical CPUs seen on the host. This automatic scaling was capped to 8 workers limiting the resources consumed. While each worker can handle a pool of connections this limit should be sufficient for most clouds. Large deployments should monitor their performance after upgrade. When using Interoperable Image Import workflow, the cloud operators can now enable automatic image conversion to desired format. When the plugin is enabled end-users do not have any input to its operation but their local checksum might not match with checksums recorded in Glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/scrubber-exit-e5d77f6f1a38ffb7.yaml0000664000175000017500000000061200000000000024735 0ustar00zuulzuul00000000000000--- fixes: - | Please note a change in the Scrubber's behavior in case of job fetching errors: * If configured to work in daemon mode, the Scrubber will log an error message at level critical, but will not exit the process. * If configured to work in non-daemon mode, the Scrubber will log an error message at level critical and exit with status one. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/scrubber-refactor-73ddbd61ebbf1e86.yaml0000664000175000017500000000073300000000000025637 0ustar00zuulzuul00000000000000others: - | The ``glance-scrubber`` utility, which is used to perfom offline deletion of images when the Glance ``delayed_delete`` option is enabled, has been refactored so that it no longer uses the Glance Registry API (and hence no longer has a dependency on the Registry v1 Client). Configuration options associated with connecting to the Glance registry are no longer required, and operators may remove them from the glance-scrubber.conf file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/secure-rbac-project-personas-fb0d9792b9dc3783.yaml0000664000175000017500000000516200000000000027511 0ustar00zuulzuul00000000000000--- features: - | Glance's default policies now use the `member` role on projects to protect writeable and readable image actions. Support was also added for read-only access to image resources when the `reader` role is granted to users on a project. Administrative operations, like creating public images, is still protected using the `admin` role on a project. Administrative actions will be updated in the future to consume system-scope. We encourage you to compare any existing overrides in your deployment with the new defaults. You can use `oslopolicy-sample-generator --namespace glance` to generate the default policies and use them for comparison. The secure RBAC personas implemented in Wallaby are marked as experimental. They will become stable in a future release. You can read more about the various personas in keystone's `Administrator Guide `_. To enable this functionality, you must specify `glance-api.conf [DEFAULT] enable_secure_rbac=True` and `glance-api.conf [oslo_policy] enforce_new_defaults=True`. Glance will refuse to start if misconfigured. upgrade: - | Glance now provides more granular RBAC access to the images API via default personas. This work is marked as experimental in Wallaby, and will be supported in a future release. Existing authorization and policies will continue to work, but we encourage operators to review the new policies and consolidate any redundant overrides with the new defaults if possible. Please review the feature section above for more details. deprecations: - | The policies protecting the image API have been deprecated in favor of more consistent defaults that use the `member` and `reader` default roles from keystone. If your deployment relies on overriding the default policies, please review the new defaults and how they may impact your deployment. The unused `modify_task` policy has been deprecated for removal. It was never honored or checked as part of an API operation. As a result, it has been deprecated for removal since overriding it has no direct impact on the tasks API, which remains a deprecated, admin-only API. security: - | Glance now ships experimental policies that support read-only image permissions. Users with the `reader` role on a project will be able to view generic image data, without the ability to make writeable changes using the images API. Please review the features section above for more information on enabling this functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/soft_delete-tasks-43ea983695faa565.yaml0000664000175000017500000000056000000000000025356 0ustar00zuulzuul00000000000000--- prelude: > - Expired tasks are now deleted. other: - Expired tasks are now deleted in Glance. As with other Glance resources, this is a "soft" deletion, that is, a deleted task is marked as ``deleted`` in the database so that the task will not appear in API responses, but the information associated with the task persists in the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/store-weight-3ed3ee612579bc25.yaml0000664000175000017500000000047000000000000024425 0ustar00zuulzuul00000000000000--- features: - | "GET" images API will now sort image locations based on store weight configured for each store in configuration files. Image will be downloaded from the store having highest weight configured. For default weight scenario the locations will remain same as per insertion order. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/support-cinder-multiple-stores-eb4e6d912d549ee9.yaml0000664000175000017500000000057400000000000030231 0ustar00zuulzuul00000000000000--- features: - | Added support for cinder multiple stores. upgrade: - | During upgrade from single cinder store to multiple cinder stores, legacy images location url will be updated to the new format with respect to the volume type configured in the stores. Legacy location url: cinder:// New location url: cinder:/// ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/train-metadefs-changes-c4380754cdd13a19.yaml0000664000175000017500000000144500000000000026231 0ustar00zuulzuul00000000000000--- upgrade: - | The following metadata definitions have been modified in the Train release: * Added ``hw:mem_encryption`` boolean in the ``OS::Nova::Flavor`` namespace, and ``hw_mem_encryption`` boolean in the ``OS::Glance::Image`` namespace. * Added ``hw_pmu`` boolean, and ``hw_cdrom_bus`` and ``hw_firmware_type`` enumerations in the ``OS::Compute::LibvirtImage`` namespace. * Added ``powervm`` to the ``hypervisor_type`` enumeration in the ``OS:::Compute::Hypervisor`` namespace. * Added ``virtio``, ``gop`` and ``none`` to the ``hw_video_model`` enumeration in the ``OS::Compute::LibvirtImage`` namespace. You may upgrade these definitions using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml0000664000175000017500000000060200000000000026666 0ustar00zuulzuul00000000000000--- features: - Implemented re-authentication with trusts when updating image status in registry after image upload. When long-running image upload takes some a lot of time (more than token expiration time) glance uses trusts to receive new token and update image status in registry. It allows users to upload big size images without increasing token expiration time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/update-show_multiple_locations-helptext-7fa692642b6b6d52.yaml0000664000175000017500000000062400000000000032011 0ustar00zuulzuul00000000000000--- other: - | The deprecation path for the configuration option ``show_multiple_locations`` has been changed because the mitigation instructions for `OSSN-0065`_ refer to this option. It is now subject to removal on or after the **Pike** release. The help text for this option has been updated accordingly. .. _`OSSN-0065`: https://wiki.openstack.org/wiki/OSSN/OSSN-0065 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/use-cursive-c6b15d94845232da.yaml0000664000175000017500000000160400000000000024172 0ustar00zuulzuul00000000000000--- other: - | Glance and Nova contain nearly identical digital signature modules. In order to better maintain and evolve this code and to eliminate the possibility that the modules diverge, we have replaced the digital signature module in Glance with the new ``cursive`` library. * The ``cursive`` library is an OpenStack project which implements OpenStack-specific verification of digital signatures. * In Newton, the majority of the signature verification code was removed from Glance. ``cursive`` has been added to Glance as a dependency and will be installed by default. * Glance uses the ``cursive`` library's functionality to verify digital signatures. To familiarize yourself with this new dependency and see the list of transitive dependencies visit http://git.openstack.org/cgit/openstack/cursive ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/use-webob-1.8.1-5c3cd1b1382f063e.yaml0000664000175000017500000000104300000000000024320 0ustar00zuulzuul00000000000000--- other: - | Negotiation of the 'Accept-Language' header now follows the "Lookup" matching scheme described in `RFC 4647, section 3.4 `_. The "Lookup" scheme is one of the algorithms suggested in `RFC 7231, section 5.3.5 `_. (This is due to a change in an underlying library, which previously used a matching scheme that did not conform to `RFC 7231 `_.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/ussuri-final-b377a21508ada060.yaml0000664000175000017500000000244300000000000024324 0ustar00zuulzuul00000000000000--- prelude: | Ussuri release includes multiple important milestones in Glance development priorities. * Added support for importing images in multiple stores * Added support for copying existing images in multiple stores * Added support to delete image from single store * Dropped support for python 2.7 fixes: - | Bug 1823703_: Wrong version URL when Glance is deployed behind proxy with vhost - | Bug 1863021_: eventlet monkey patch results in assert len(_active) == 1 AssertionError - | Bug 1855708_: Reload tests broken in Py3 - | Bug 1850412_: Useful image properties in glance - os_admin_user not documented - | Bug 1863879_: Multiple import fails if all-stores 'True' is passed - | Bug 1861723_: Glance is listening on TCP socket before store initialization - | Bug 1861501_: Store ID fetched from URI is incorrectly encoded under py27 .. _1823703: https://code.launchpad.net/bugs/1823703 .. _1863021: https://code.launchpad.net/bugs/1863021 .. _1855708: https://code.launchpad.net/bugs/1855708 .. _1850412: https://code.launchpad.net/bugs/1850412 .. _1863879: https://code.launchpad.net/bugs/1863879 .. _1861723: https://code.launchpad.net/bugs/1861723 .. _1861501: https://code.launchpad.net/bugs/1861501 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/victoria-m2-release-notes-8a6ae2fdb3d29dae.yaml0000664000175000017500000000317100000000000027201 0ustar00zuulzuul00000000000000--- features: - | Added policy support to allow copying image to multiple stores, even if those images are not owned by the current user's project. fixes: - | Bug 1888349_: glance-cache-manage utility is broken - | Bug 1886374_: Improve lazy loading mechanism for multiple stores - | Bug 1885003_: Interrupted copy-image may break a subsequent operation - | Bug 1884587_: image import copy-image API should reflect proper authorization - | Bug 1876419_: Failed to parse json file /etc/glance/metadefs/compute-vmware.json - | Bug 1856581_: metadefs: OS::Glance::CommonImageProperties out of date - | Bug 1843576_: Glance metadefs is missing Image property hw_vif_multiqueue_enabled - | Bug 1856578_: docs: image schema customization restrictions - | Bug 1808814_: admin docs: interoperable image import revision for stein - | Bug 1870336_: Update 'common image properties' doc - | Bug 1888713_: Async tasks, image import not supported in pure-WSGI mode .. _1888349: https://code.launchpad.net/bugs/1888349 .. _1886374: https://code.launchpad.net/bugs/1886374 .. _1885003: https://code.launchpad.net/bugs/1885003 .. _1884587: https://code.launchpad.net/bugs/1884587 .. _1876419: https://code.launchpad.net/bugs/1876419 .. _1856581: https://code.launchpad.net/bugs/1856581 .. _1843576: https://code.launchpad.net/bugs/1843576 .. _1856578: https://code.launchpad.net/bugs/1856578 .. _1808814: https://code.launchpad.net/bugs/1808814 .. _1870336: https://code.launchpad.net/bugs/1870336 .. _1888713: https://code.launchpad.net/bugs/1888713 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/victoria-m3-releasenotes-9209cea98a29abc4.yaml0000664000175000017500000000201100000000000026702 0ustar00zuulzuul00000000000000--- features: - | Added support to calculate virtual size of image based on disk format - | Added support for sparse image upload for filesystem and rbd driver - | Improved performance of rbd store chunk upload - | Added support to configure multiple cinder stores upgrade: - | After upgrading, deployments using the cinder backend should update their config to specify a volume type. Existing images on those backends will be updated at runtime (lazily, when they are first read) to a location URL that includes the store and volume type information. fixes: - | Bug 1891190_: test_reload() functional test causes hang and jobs TIMED_OUT - | Bug 1891352_: Failed import of one store will remain in progress forever if all_stores_must_succeed=True - | Bug 1887099_: Invalid metadefs for watchdog .. _1891190: https://code.launchpad.net/bugs/1891190 .. _1891352: https://code.launchpad.net/bugs/1891352 .. _1887099: https://code.launchpad.net/bugs/1887099 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/victoria-rc1-release-notes-d928355cf90d608d.yaml0000664000175000017500000000144000000000000027002 0ustar00zuulzuul00000000000000--- prelude: | The Victoria release includes some important milestones in Glance development priorities. * Added support to calculate virtual size of image based on disk format * Added support for sparse image upload for filesystem and rbd driver of glance_store * Improved performance of rbd store chunk upload * Fixed some important bugs around copy-image import method and importing image to multiple stores * Added support to configure multiple cinder stores fixes: - | Bug 1795950_: Fix cleaning of web-download image import in node_staging_uri - | Bug 1895663_: Image import "web-download" doesn't check on download size .. _1795950: https://code.launchpad.net/bugs/1795950 .. _1895663: https://code.launchpad.net/bugs/1895663 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/virtuozzo-hypervisor-fada477b64ae829d.yaml0000664000175000017500000000050500000000000026452 0ustar00zuulzuul00000000000000--- upgrade: - | The metadata definition for ``hypervisor_type`` in the ``OS::Compute::Hypervisor`` namespace has been extended to include the Virtuozzo hypervisor, designated as ``vz``. You may upgrade the definition using: ``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/wallaby-m3-releasenotes-bdc9fe6938aba8cc.yaml0000664000175000017500000000064000000000000026750 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1913625_: Glance will leak staging data - | Bug 1914826_: web-download with invalid url does not report error - | Bug 1916011_: test_migrate_image_after_upgrade failing because of glance cinder store change .. _1913625: https://code.launchpad.net/bugs/1913625 .. _1914826: https://code.launchpad.net/bugs/1914826 .. _1916011: https://code.launchpad.net/bugs/1916011 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/windows-support-f4aae61681dba569.yaml0000664000175000017500000000010000000000000025255 0ustar00zuulzuul00000000000000--- features: - | Glance services can now run on Windows. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/wsgi-containerization-369880238a5e793d.yaml0000664000175000017500000000450100000000000026130 0ustar00zuulzuul00000000000000--- features: - | Glance is now packaged with a WSGI script entrypoint, enabling it to be run as a WSGI application hosted by a performant web server. See `Running Glance in HTTPD `_ in the Glance documentation for details. There are some limitations with this method of deploying Glance and we do not recommend its use in production environments at this time. See the `Known Issues`_ section of this document for more information. issues: - | Although support has been added for Glance to be run as a WSGI application hosted by a web server, the atypical nature of the Images APIs provided by Glance, which enable transfer of copious amounts of image data, makes it difficult for this approach to work without careful configuration. Glance relies on the use of chunked transfer encoding for image uploads, and the support of chunked transfer encoding is not required by the `WSGI specification`_. The Glance documentation section `Running Glance in HTTPD`_ outlines some approaches to use (and not to use) Glance with the Apache httpd server. This is the way Glance is configured as a WSGI application in devstack, so it's the method with which we've had the most experience. If you try deploying Glance using a different web server, please consider contributing your findings to the Glance documentation. Currently, we are experiencing some problems in the gate when Glance is configured to run in devstack following the guidelines recommended in the documentation. You can follow `Bug 1703856`_ to learn more. As far as the Glance team can determine, the difficulties running Glance as a WSGI application are caused by issues external to Glance. Thus the Glance team recommends that Glance be run in its normal standalone configuration, particularly in production environments. If you choose to run Glance as a WSGI application in a web server, be sure to test your installation carefully with realistic usage scenarios. .. _`WSGI specification`: https://www.python.org/dev/peps/pep-0333/ .. _`Running Glance in HTTPD`: https://docs.openstack.org/glance/latest/admin/apache-httpd.html .. _`Bug 1703856`: https://bugs.launchpad.net/glance/+bug/1703856 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/xena-m2-releasenotes-e68fd81ece1d514a.yaml0000664000175000017500000000063300000000000026103 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1916052_: Unable to create trust errors in glance-api - | Bug 1934673_: Policy deprecations falsely claims defaulting to role based policies - | Bug 1922928_: Image tasks API excludes in-progress tasks .. _1916052: https://code.launchpad.net/bugs/1916052 .. _1934673: https://code.launchpad.net/bugs/1934673 .. _1922928: https://code.launchpad.net/bugs/1922928 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/xena-m3-releasenotes-a92d55d29eecc8f6.yaml0000664000175000017500000000166000000000000026117 0ustar00zuulzuul00000000000000--- features: - | Glance's default policies for metadef APIs now support member and reader roles for Secure RBAC project persona. Administrative operations like create, delete and update are still protected using the `admin` role on a project. Administrative actions will be updated in the future to consume system-scope. fixes: - | Bug 1936665_: Functional tests not available for metadef resource types - | Bug 1895173_: Caught error: UPDATE statement on table 'image_properties'. expected to update 1 row(s); 0 were matched - | Bug 1940090_: options of the castellan library are missing from glance-api.conf - | Bug 1885928_: Unable to spawn VM from community image .. _1936665: https://code.launchpad.net/bugs/1936665 .. _1895173: https://code.launchpad.net/bugs/1895173 .. _1940090: https://code.launchpad.net/bugs/1940090 .. _1885928: https://code.launchpad.net/bugs/1885928././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/xena-rc1-release-notes-12dbe0ac528ce483.yaml0000664000175000017500000000151500000000000026234 0ustar00zuulzuul00000000000000--- prelude: | The Xena release includes some important milestones in Glance development priorities. * Added support for unified quotas using keystone limits * Moved policy enforcement in API layer * Implemented Secure RBAC project scope for metadef APIs * Fixed some important bugs around multi-store imports and precaching images fixes: - | Bug 1939307_: glance-uwsgi - Add missing cache prefetching periodic job - | Bug 1940733_: [oslo_reports] options are missing from the config file generated by oslo-confi-generator - | Bug 1939944_: The parameters of the healthcheck middlewares are missing from glance-api.conf .. _1939307: https://code.launchpad.net/bugs/1939307 .. _1940733: https://code.launchpad.net/bugs/1940733 .. _1939944: https://code.launchpad.net/bugs/1939944 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/yoga-rc1-release-notes-153932161f52a038.yaml0000664000175000017500000000136300000000000025661 0ustar00zuulzuul00000000000000--- prelude: | The Yoga release includes some important milestones in Glance development priorities. * Added support to get quota usage information. * Added new APIs for cache related operations. * Added support to append new metadef tags rather than overwriting the existing tags. * Added support to fetch additional information about RBD store. fixes: - | Bug 1939169_: glance md-tag-create-multiple overwrites existing tags - | Bug 1953063_: Image import causes SQL type casting error on PostgreSQL - | Bug 1954321_: Python3.10 error .. _1939169: https://code.launchpad.net/bugs/1939169 .. _1953063: https://code.launchpad.net/bugs/1953063 .. _1954321: https://code.launchpad.net/bugs/1954321././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/zed-milestone-1-592415040e67924e.yaml0000664000175000017500000000136500000000000024426 0ustar00zuulzuul00000000000000--- critical: - | Correction of API response code for PUT /v2/cache/{image_id} from HTTP 200 to HTTP 202. (`Bug 1971521 `_) fixes: - | Bug 1875629_: api-ref needs update about checksum image property - | Bug 1971176_: api-ref: cache manage needs improvements - | Bug 1973631_: List call for metadef namespaces returns 404 not found while fetching resource_types - | Bug 1939922_: Internal server error if shared member tries to stage data to image .. _1875629: https://code.launchpad.net/bugs/1875629 .. _1971176: https://code.launchpad.net/bugs/1971176 .. _1973631: https://code.launchpad.net/bugs/1973631 .. _1939922: https://code.launchpad.net/bugs/1939922 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/zed-milestone-2-a782e75cdbd8fe13.yaml0000664000175000017500000000121700000000000025066 0ustar00zuulzuul00000000000000--- fixes: - | Bug 1636243_: Add CPU Mode Metadata Def - | Bug 1973136_: glance-multistore-cinder-import is failing consistently - | Bug 1962581_: bad default value for [wsgi] /python_interpreter option - | Bug 1962480_: api-ref: versions response needs an update - | Bug 1946100_: [oslo_limit] parameters are missing from glance-api.conf .. _1636243: https://code.launchpad.net/bugs/1636243 .. _1973136: https://code.launchpad.net/bugs/1973136 .. _1962581: https://code.launchpad.net/bugs/1962581 .. _1962480: https://code.launchpad.net/bugs/1962480 .. _1946100: https://code.launchpad.net/bugs/1946100 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/notes/zed-milestone-3-3e38697ae4677a81.yaml0000664000175000017500000000117100000000000024600 0ustar00zuulzuul00000000000000--- prelude: > The Zed release includes some important milestones in Glance development priorities. * Extended the functionality of stores-detail API * Added glance-download internal plugin to download the image from remote glance * Added support for immediate caching of an image * Removed dead code of auth and policy layers fixes: - | Bug 1905672_: Non existing property protection file raises 500 Internal server error - | Bug 1982426_: Python3.11: "glance-manage" crashes .. _1905672: https://code.launchpad.net/bugs/1905672 .. _1982426: https://code.launchpad.net/bugs/1982426 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/0000775000175000017500000000000000000000000017047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000020320 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020321 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020321 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022746 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023455 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/conf.py0000664000175000017500000002133700000000000020354 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Glance Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Glance Release Notes' copyright = '2015, Glance Developers' # Release notes are version independent, no need to set version and release release = '' version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # openstackdocstheme options openstackdocs_repo_name = 'openstack/glance' openstackdocs_bug_project = 'glance' openstackdocs_auto_name = False openstackdocs_bug_tag = 'releasenotes' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'GlanceReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'GlanceReleaseNotes.tex', 'Glance Release Notes Documentation', 'Glance Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'glancereleasenotes', 'Glance Release Notes Documentation', ['Glance Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'GlanceReleaseNotes', 'Glance Release Notes Documentation', 'Glance Developers', 'GlanceReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/index.rst0000664000175000017500000000043400000000000020711 0ustar00zuulzuul00000000000000====================== Glance Release Notes ====================== .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000021247 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7862947 glance-29.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7862947 glance-29.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023045 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000100272100000000000026101 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: Glance Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-08-29 21:43+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-09-01 01:06+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "\"GET\" images API will now sort image locations based on store weight " "configured for each store in configuration files. Image will be downloaded " "from the store having highest weight configured. For default weight scenario " "the locations will remain same as per insertion order." msgstr "" "\"GET\" images API will now sort image locations based on store weight " "configured for each store in configuration files. Image will be downloaded " "from the store having highest weight configured. For default weight scenario " "the locations will remain same as per insertion order." msgid "\"glance-manage\" purges all deleted rows if \"--max_rows\" equals -1" msgstr "\"glance-manage\" purges all deleted rows if \"--max_rows\" equals -1" msgid "'community' - the image is available for consumption by all users" msgstr "'community' - the image is available for consumption by all users" msgid "" "'enabled_import_methods' is ListOpt type config option. If only one method " "is enabled, the format needs to be ['method'] this is not obvious as even " "the example does not have the '[' and ']' boundaries around the default " "value." msgstr "" "'enabled_import_methods' is ListOpt type config option. If only one method " "is enabled, the format needs to be ['method'] this is not obvious as even " "the example does not have the '[' and ']' boundaries around the default " "value." msgid "'private' - the image is accessible only to its owner" msgstr "'private' - the image is accessible only to its owner" msgid "" "'public' - reserved by default for images supplied by the operator for the " "use of all users" msgstr "" "'public' - reserved by default for images supplied by the operator for the " "use of all users" msgid "" "'shared' - the image is completely accessible to the owner and available for " "consumption by any image members" msgstr "" "'shared' - the image is completely accessible to the owner and available for " "consumption by any image members" msgid "" "**Adding** locations is disallowed on the following image statuses - " "``saving``, ``deactivated``, ``deleted``, ``pending_delete``, ``killed``." msgstr "" "**Adding** locations is disallowed on the following image statuses - " "``saving``, ``deactivated``, ``deleted``, ``pending_delete``, ``killed``." msgid "" "**Experimental** zero-downtime database upgrade using an expand-migrate-" "contract series of operations is available." msgstr "" "**Experimental** zero-downtime database upgrade using an expand-migrate-" "contract series of operations is available." msgid "" "*File system store operators*: the old name, now **DEPRECATED**, was " "``filesystem``. The **new** name, used in both glance and glance_store, is " "``file``" msgstr "" "*File system store operators*: the old name, now **DEPRECATED**, was " "``filesystem``. The **new** name, used in both glance and glance_store, is " "``file``" msgid "" "*VMware datastore operators*: The old name, now **DEPRECATED**, was " "``vmware_datastore``. The **new** name, used in both glance and " "glance_store, is ``vmware``" msgstr "" "*VMware datastore operators*: The old name, now **DEPRECATED**, was " "``vmware_datastore``. The **new** name, used in both glance and " "glance_store, is ``vmware``" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.2" msgstr "11.0.2" msgid "12.0.0" msgstr "12.0.0" msgid "12.0.0-20" msgstr "12.0.0-20" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "15.0.0" msgstr "15.0.0" msgid "15.0.1" msgstr "15.0.1" msgid "16.0.0" msgstr "16.0.0" msgid "16.0.1" msgstr "16.0.1" msgid "16.0.1-11" msgstr "16.0.1-11" msgid "17.0.0" msgstr "17.0.0" msgid "17.0.1" msgstr "17.0.1" msgid "18.0.0" msgstr "18.0.0" msgid "19.0.0" msgstr "19.0.0" msgid "19.0.2" msgstr "19.0.2" msgid "19.0.4" msgstr "19.0.4" msgid "20.0.0" msgstr "20.0.0" msgid "20.0.1" msgstr "20.0.1" msgid "20.1.0" msgstr "20.1.0" msgid "20.2.0" msgstr "20.2.0" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "21.0.0" msgstr "21.0.0" msgid "21.1.0" msgstr "21.1.0" msgid "22.0.0" msgstr "22.0.0" msgid "22.1.0" msgstr "22.1.0" msgid "23.0.0" msgstr "23.0.0" msgid "23.1.0" msgstr "23.1.0" msgid "24.0.0" msgstr "24.0.0" msgid "24.1.0" msgstr "24.1.0" msgid "24.2.0" msgstr "24.2.0" msgid "25.0.0" msgstr "25.0.0" msgid "26.0.0" msgstr "26.0.0" msgid "26.1.0" msgstr "26.1.0" msgid "26.1.0-3" msgstr "26.1.0-3" msgid "27.0.0" msgstr "27.0.0" msgid "27.1.0" msgstr "27.1.0" msgid "27.1.0-4" msgstr "27.1.0-4" msgid "28.0.0" msgstr "28.0.0" msgid "28.0.1" msgstr "28.0.1" msgid "28.1.0" msgstr "28.1.0" msgid "28.1.0-7" msgstr "28.1.0-7" msgid "29.0.0.0b1" msgstr "29.0.0.0b1" msgid "29.0.0.0b2" msgstr "29.0.0.0b2" msgid "29.0.0.0b2-21" msgstr "29.0.0.0b2-21" msgid "" "A change was added to the import API which provides time-based locking of an " "image to exclude other import operations from starting until the lock-" "holding task completes (see Bug 1884596_). The lock is based on the task " "that we start to do the work, and the UUID of that task is stored in the " "``os_glance_import_task`` image property, which indicates who owns the lock. " "If the task holding the lock fails to make progress for 60 minutes, another " "import operation will be allowed to steal the lock and start another import " "operation." msgstr "" "A change was added to the import API which provides time-based locking of an " "image to exclude other import operations from starting until the lock-" "holding task completes (see Bug 1884596_). The lock is based on the task " "that we start to do the work, and the UUID of that task is stored in the " "``os_glance_import_task`` image property, which indicates who owns the lock. " "If the task holding the lock fails to make progress for 60 minutes, another " "import operation will be allowed to steal the lock and start another import " "operation." msgid "" "A new common image property, 'description', has been added. This allows you " "to specify a brief human-readable description, suitable for display in a " "user interface, on images. It has been possible to do this previously using " "a custom image property; this change simply standardizes the usage in order " "to promote interoperability. This change has no effect on any property named " "'description' on existing images, and it is not a required image property." msgstr "" "A new common image property, 'description', has been added. This allows you " "to specify a brief human-readable description, suitable for display in a " "user interface, on images. It has been possible to do this previously using " "a custom image property; this change simply standardises the usage in order " "to promote interoperability. This change has no effect on any property named " "'description' on existing images, and it is not a required image property." msgid "" "A new interoperable image import method, ``web-download`` is introduced." msgstr "" "A new interoperable image import method, ``web-download`` is introduced." msgid "" "A new interoperable image import method, ``web-download`` is introduced. " "This method allows an end user to import an image from a remote URL. The " "image data is retrieved from the URL and stored in the Glance backend. (In " "other words, this is a **copy-from** operation.)" msgstr "" "A new interoperable image import method, ``web-download`` is introduced. " "This method allows an end user to import an image from a remote URL. The " "image data is retrieved from the URL and stored in the Glance backend. (In " "other words, this is a **copy-from** operation.)" msgid "" "A new optional header ``X-Openstack-Append`` has been added to append the " "new metadef tags to the existing tags. If the header is present it will " "append the new tags to the existing one, if not then it will default to the " "old behaviour i.e. overwriting the existing tags with the new one." msgstr "" "A new optional header ``X-Openstack-Append`` has been added to append the " "new metadef tags to the existing tags. If the header is present it will " "append the new tags to the existing one, if not then it will default to the " "old behaviour i.e. overwriting the existing tags with the new one." msgid "" "A new policy, ``tasks_api_access`` has been introduced so that ordinary user " "credentials may be used by Glance to manage the tasks that accomplish the " "interoperable image import process without requiring that operators expose " "the Tasks API to end users." msgstr "" "A new policy, ``tasks_api_access`` has been introduced so that ordinary user " "credentials may be used by Glance to manage the tasks that accomplish the " "interoperable image import process without requiring that operators expose " "the Tasks API to end users." msgid "" "A new value for the Image 'visibility' field, 'community', is introduced." msgstr "" "A new value for the Image 'visibility' field, 'community', is introduced." msgid "" "A new value for visibility, 'shared', is introduced. Images that have or " "can accept members will no longer be displayed as having 'private' " "visibility, reducing confusion among end users." msgstr "" "A new value for visibility, 'shared', is introduced. Images that have or " "can accept members will no longer be displayed as having 'private' " "visibility, reducing confusion among end users." msgid "" "A plugin framework for customizing the processing of imported images before " "they become active is introduced in this release, along with a new plugin " "that injects image metadata properties into imported images." msgstr "" "A plugin framework for customising the processing of imported images before " "they become active is introduced in this release, along with a new plugin " "that injects image metadata properties into imported images." msgid "" "A preview of zero-downtime database upgrades is available in this release, " "but it is **experimental** and **not supported for production systems**. " "Please consult the `Database Management`_ section of the Glance " "documentation for details." msgstr "" "A preview of zero-downtime database upgrades is available in this release, " "but it is **experimental** and **not supported for production systems**. " "Please consult the `Database Management`_ section of the Glance " "documentation for details." msgid "" "A recent change to oslo.log (>= 3.17.0) set the default value of ``[DEFAULT]/" "use_stderr`` to ``False`` in order to prevent duplication of logs (as " "reported in bug \\#1588051). Since this would change the current behaviour " "of certain glance commands (e.g., glance-replicator, glance-cache-manage, " "etc.), we chose to override the default value of ``use_stderr`` to ``True`` " "in those commands. We also chose not to override that value in any Glance " "service (e.g., glance-api, glance-registry) so that duplicate logs are not " "created by those services. Operators that have a usecase that relies on logs " "being reported on standard error may set ``[DEFAULT]/use_stderr = True`` in " "the appropriate service's configuration file upon deployment." msgstr "" "A recent change to oslo.log (>= 3.17.0) set the default value of ``[DEFAULT]/" "use_stderr`` to ``False`` in order to prevent duplication of logs (as " "reported in bug \\#1588051). Since this would change the current behaviour " "of certain glance commands (e.g., glance-replicator, glance-cache-manage, " "etc.), we chose to override the default value of ``use_stderr`` to ``True`` " "in those commands. We also chose not to override that value in any Glance " "service (e.g., glance-api, glance-registry) so that duplicate logs are not " "created by those services. Operators that have a use-case that relies on " "logs being reported on standard error may set ``[DEFAULT]/use_stderr = " "True`` in the appropriate service's configuration file upon deployment." msgid "" "A return code of ``0`` means you are currently up to date with the latest " "migration script version and all ``db`` upgrades are complete." msgstr "" "A return code of ``0`` means you are currently up to date with the latest " "migration script version and all ``db`` upgrades are complete." msgid "" "A return code of ``3`` means that an upgrade from your current database " "version is available and your first step is to run ``glance-manage db " "expand``." msgstr "" "A return code of ``3`` means that an upgrade from your current database " "version is available and your first step is to run ``glance-manage db " "expand``." msgid "" "A return code of ``4`` means that the expansion stage is complete, and the " "next step is to run ``glance-manage db migrate``." msgstr "" "A return code of ``4`` means that the expansion stage is complete, and the " "next step is to run ``glance-manage db migrate``." msgid "" "A return code of ``5`` means that the expansion and data migration stages " "are complete, and the next step is to run ``glance-manage db contract``." msgstr "" "A return code of ``5`` means that the expansion and data migration stages " "are complete, and the next step is to run ``glance-manage db contract``." msgid "Accept the Range header in requests to serve partial images." msgstr "Accept the Range header in requests to serve partial images." msgid "Add ``ploop`` to the list of supported disk formats." msgstr "Add ``ploop`` to the list of supported disk formats." msgid "Add ``vhdx`` to list of supported disk format." msgstr "Add ``vhdx`` to list of supported disk format." msgid "" "Add ability to import image into multiple stores during `interoperable image " "import process`_." msgstr "" "Add ability to import image into multiple stores during `interoperable image " "import process`_." msgid "" "Add ability to import image into multiple stores during `interoperable image " "import process`_. This feature will only work if multiple stores are enabled " "in the deployment. It introduces 3 new optional body fields to the `import " "API path`:" msgstr "" "Add ability to import image into multiple stores during `interoperable image " "import process`_. This feature will only work if multiple stores are enabled " "in the deployment. It introduces 3 new optional body fields to the `import " "API path`:" msgid "" "Added ``hw:mem_encryption`` boolean in the ``OS::Nova::Flavor`` namespace, " "and ``hw_mem_encryption`` boolean in the ``OS::Glance::Image`` namespace." msgstr "" "Added ``hw:mem_encryption`` boolean in the ``OS::Nova::Flavor`` namespace, " "and ``hw_mem_encryption`` boolean in the ``OS::Glance::Image`` namespace." msgid "" "Added ``hw:virtio_packed_ring`` boolean in the ``OS::Nova::Flavor`` " "namespace, and ``hw_virtio_packed_ring`` boolean in the ``OS::Glance::" "Image`` namespace." msgstr "" "Added ``hw:virtio_packed_ring`` boolean in the ``OS::Nova::Flavor`` " "namespace, and ``hw_virtio_packed_ring`` boolean in the ``OS::Glance::" "Image`` namespace." msgid "" "Added ``hw_pmu`` boolean, and ``hw_cdrom_bus`` and ``hw_firmware_type`` " "enumerations in the ``OS::Compute::LibvirtImage`` namespace." msgstr "" "Added ``hw_pmu`` boolean, and ``hw_cdrom_bus`` and ``hw_firmware_type`` " "enumerations in the ``OS::Compute::LibvirtImage`` namespace." msgid "" "Added ``powervm`` to the ``hypervisor_type`` enumeration in the ``OS:::" "Compute::Hypervisor`` namespace." msgstr "" "Added ``powervm`` to the ``hypervisor_type`` enumeration in the ``OS:::" "Compute::Hypervisor`` namespace." msgid "" "Added ``virtio``, ``gop`` and ``none`` to the ``hw_video_model`` enumeration " "in the ``OS::Compute::LibvirtImage`` namespace." msgstr "" "Added ``virtio``, ``gop`` and ``none`` to the ``hw_video_model`` enumeration " "in the ``OS::Compute::LibvirtImage`` namespace." msgid "" "Added a new command ``glance-manage db check``, the command will allow a " "user to check the status of upgrades in the database." msgstr "" "Added a new command ``glance-manage db check``, the command will allow a " "user to check the status of upgrades in the database." msgid "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process." msgstr "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process." msgid "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process. This plugin implements " "the spec `Inject metadata properties automatically to non-admin images`_. " "See the spec for a discussion of the use case addressed by this plugin." msgstr "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process. This plugin implements " "the spec `Inject metadata properties automatically to non-admin images`_. " "See the spec for a discussion of the use case addressed by this plugin." msgid "" "Added additional metadata for CPU thread pinning policies to 'compute-cpu-" "pinning.json'. Use the ``glance_manage`` tool to upgrade." msgstr "" "Added additional metadata for CPU thread pinning policies to 'compute-cpu-" "pinning.json'. Use the ``glance_manage`` tool to upgrade." msgid "" "Added cli_opts and cache_opts to support configgen to pick all groups from " "wsgi.py" msgstr "" "Added cli_opts and cache_opts to support configgen to pick all groups from " "wsgi.py" msgid "Added new APIs for cache related operations." msgstr "Added new APIs for cache related operations." msgid "" "Added new import method ``copy-image`` which will copy existing image into " "multiple stores." msgstr "" "Added new import method ``copy-image`` which will copy existing image into " "multiple stores." msgid "" "Added new import method ``copy-image`` which will copy existing image into " "multiple stores. The new import method will work only if multiple stores are " "enabled in the deployment. To use this feature operator needs to mention " "``copy-image`` import method in ``enabled_import_methods`` configuration " "option. Note that this new internal plugin applies *only* to images imported " "via the `interoperable image import process`_." msgstr "" "Added new import method ``copy-image`` which will copy existing image into " "multiple stores. The new import method will work only if multiple stores are " "enabled in the deployment. To use this feature operator needs to mention " "``copy-image`` import method in ``enabled_import_methods`` configuration " "option. Note that this new internal plugin applies *only* to images imported " "via the `interoperable image import process`_." msgid "" "Added oslopolicy enforcer entrypoint making it possible to utilize " "oslopolicy-policy-generator to get uniform information about the policies. " "NOTE: Glance will require policy.json being present for any meaningful " "output." msgstr "" "Added oslopolicy enforcer entrypoint making it possible to utilise " "oslopolicy-policy-generator to get uniform information about the policies. " "NOTE: Glance will require policy.json being present for any meaningful " "output." msgid "" "Added policy support to allow copying image to multiple stores, even if " "those images are not owned by the current user's project." msgstr "" "Added policy support to allow copying image to multiple stores, even if " "those images are not owned by the current user's project." msgid "Added support for cinder multiple stores." msgstr "Added support for Cinder multiple stores." msgid "Added support for copying existing images in multiple stores" msgstr "Added support for copying existing images in multiple stores" msgid "Added support for importing images in multiple stores" msgstr "Added support for importing images in multiple stores" msgid "Added support for sparse image upload for filesystem and rbd driver" msgstr "Added support for sparse image upload for filesystem and RBD driver" msgid "" "Added support for sparse image upload for filesystem and rbd driver of " "glance_store" msgstr "" "Added support for sparse image upload for filesystem and rbd driver of " "glance_store" msgid "Added support for unified quotas using keystone limits" msgstr "Added support for unified quotas using Keystone limits" msgid "" "Added support to append new metadef tags rather than overwriting the " "existing tags." msgstr "" "Added support to append new metadef tags rather than overwriting the " "existing tags." msgid "Added support to calculate virtual size of image based on disk format" msgstr "Added support to calculate virtual size of image based on disk format" msgid "Added support to configure multiple cinder stores" msgstr "Added support to configure multiple cinder stores" msgid "Added support to delete image from single store" msgstr "Added support to delete image from single store" msgid "Added support to fetch additional information about RBD store." msgstr "Added support to fetch additional information about RBD store." msgid "Added support to get quota usage information." msgstr "Added support to get quota usage information." msgid "Added support to immediately start caching of an image." msgstr "Added support to immediately start caching an image." msgid "" "Adding locations to a non-active or non-queued image is no longer allowed." msgstr "" "Adding locations to a non-active or non-queued image is no longer allowed." msgid "" "Additional values were added to the enumeration for the `hw_disk_bus`_ " "property in the ``OS::Compute::LibvirtImage`` namespace." msgstr "" "Additional values were added to the enumeration for the `hw_disk_bus`_ " "property in the ``OS::Compute::LibvirtImage`` namespace." msgid "" "Additionally, the default value of this option has been changed in this " "release. See the \"Upgrade Notes\" section of this document for more " "information." msgstr "" "Additionally, the default value of this option has been changed in this " "release. See the \"Upgrade Notes\" section of this document for more " "information." msgid "" "Additionally, you will need to verify that the task-related policies in the " "Glance policy.json file are set correctly. These settings are described " "below." msgstr "" "Additionally, you will need to verify that the task-related policies in the " "Glance policy.json file are set correctly. These settings are described " "below." msgid "" "After upgrading, deployments using the cinder backend should update their " "config to specify a volume type. Existing images on those backends will be " "updated at runtime (lazily, when they are first read) to a location URL that " "includes the store and volume type information." msgstr "" "After upgrading, deployments using the Cinder backend should update their " "config to specify a volume type. Existing images on those backends will be " "updated at runtime (lazily, when they are first read) to a location URL that " "includes the store and volume type information." msgid "" "All ``qemu-img info`` calls are now run under resource limitations that " "limit the CPU time and address space usage of the process running the " "command to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks, which by default (since the Mitaka release) are " "only available to admin users. We continue to recommend that tasks only be " "exposed to trusted users" msgstr "" "All ``qemu-img info`` calls are now run under resource limitations that " "limit the CPU time and address space usage of the process running the " "command to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks, which by default (since the Mitaka release) are " "only available to admin users. We continue to recommend that tasks only be " "exposed to trusted users" msgid "" "All ``qemu-img info`` calls will be run under resource limitations that " "limit the CPU time and address space usage of the process if oslo." "concurrency is at least version 2.6.1. ``qemu-img info`` calls are now " "limited to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks. In the Mitaka release, tasks by default will only " "be available to admin users. In general, we recommend that tasks only be " "exposed to trusted users, even in releases prior to Mitaka." msgstr "" "All ``qemu-img info`` calls will be run under resource limitations that " "limit the CPU time and address space usage of the process if oslo." "concurrency is at least version 2.6.1. ``qemu-img info`` calls are now " "limited to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks. In the Mitaka release, tasks by default will only " "be available to admin users. In general, we recommend that tasks only be " "exposed to trusted users, even in releases prior to Mitaka." msgid "" "All images currently with 'public' visibility (that is, images for which " "'is_public' is True in the database) will have their visibility set to " "'public'." msgstr "" "All images currently with 'public' visibility (that is, images for which " "'is_public' is True in the database) will have their visibility set to " "'public'." msgid "" "All the ``glance manage db`` commands are changed appropriately to use " "Alembic to perform operations such as ``version``, ``upgrade``, ``sync`` and " "``version_control``. Hence, the \"old-style\" migration scripts will no " "longer work with the Ocata glance manage db commands." msgstr "" "All the ``glance manage db`` commands are changed appropriately to use " "Alembic to perform operations such as ``version``, ``upgrade``, ``sync`` and " "``version_control``. Hence, the \"old-style\" migration scripts will no " "longer work with the Ocata glance manage db commands." msgid "" "Also, the project reader role is ready to use. Users with reader role can " "only perform the read-only operations within their project. This role can be " "used for the audit purposes." msgstr "" "Also, the project reader role is ready to use. Users with reader role can " "only perform the read-only operations within their project. This role can be " "used for audit purposes." msgid "" "Although support has been added for Glance to be run as a WSGI application " "hosted by a web server, the atypical nature of the Images APIs provided by " "Glance, which enable transfer of copious amounts of image data, makes it " "difficult for this approach to work without careful configuration. Glance " "relies on the use of chunked transfer encoding for image uploads, and the " "support of chunked transfer encoding is not required by the `WSGI " "specification`_." msgstr "" "Although support has been added for Glance to be run as a WSGI application " "hosted by a web server, the atypical nature of the Images APIs provided by " "Glance, which enable transfer of copious amounts of image data, makes it " "difficult for this approach to work without careful configuration. Glance " "relies on the use of chunked transfer encoding for image uploads, and the " "support of chunked transfer encoding is not required by the `WSGI " "specification`_." msgid "" "An **EXPERIMENTAL** version of the Images API supplied by Glance is " "introduced as **2.6**. It includes the new API calls introduced for the " "`refactored image import`_ functionality. This functionality is **not** " "enabled by default, so the CURRENT version of the Images API remains at " "2.5. There are no changes to the version 2.5 API in this release, so all " "version 2.5 calls will work whether or not the new import functionality is " "enabled or not." msgstr "" "An **EXPERIMENTAL** version of the Images API supplied by Glance is " "introduced as **2.6**. It includes the new API calls introduced for the " "`refactored image import`_ functionality. This functionality is **not** " "enabled by default, so the CURRENT version of the Images API remains at " "2.5. There are no changes to the version 2.5 API in this release, so all " "version 2.5 calls will work whether or not the new import functionality is " "enabled or not." msgid "" "An enumeration of values was added for the `vmware:hw_version`_ property in " "the ``OS::Compute::VMwareFlavor`` namespace." msgstr "" "An enumeration of values was added for the `vmware:hw_version`_ property in " "the ``OS::Compute::VMwareFlavor`` namespace." msgid "" "An image created by the Block Storage service will have these properties set " "automatically, with the deletion policy set to ``on_image_deletion``." msgstr "" "An image created by the Block Storage service will have these properties set " "automatically, with the deletion policy set to ``on_image_deletion``." msgid "" "An image must have 'shared' visibility in order to accept members. This " "provides a safeguard from 'private' images being shared inadvertently." msgstr "" "An image must have 'shared' visibility in order to accept members. This " "provides a safeguard from 'private' images being shared inadvertently." msgid "" "An image that has 'community' visibility in the v2 API will have " "``is_public`` == False in the v1 API. It will behave like a private image, " "that is, only the owner (or an admin) will have access to the image, and " "only the owner (or an admin) will see the image in the image-list response." msgstr "" "An image that has 'community' visibility in the v2 API will have " "``is_public`` == False in the v1 API. It will behave like a private image, " "that is, only the owner (or an admin) will have access to the image, and " "only the owner (or an admin) will see the image in the image-list response." msgid "" "An image with 'community' visibility is available for consumption by any " "user." msgstr "" "An image with 'community' visibility is available for consumption by any " "user." msgid "" "As Glance relies on oslo.middleware for this feature, care needs to be taken " "that it is configured properly from this release forward." msgstr "" "As Glance relies on oslo.middleware for this feature, care needs to be taken " "that it is configured properly from this release forward." msgid "" "As Image Import will be always enabled, care needs to be taken that it is " "configured properly from this release forward. The 'enable_image_import' " "option is silently ignored." msgstr "" "As Image Import will be always enabled, care needs to be taken that it is " "configured properly from this release forward. The 'enable_image_import' " "option is silently ignored." msgid "" "As far as the Glance team can determine, the difficulties running Glance as " "a WSGI application are caused by issues external to Glance. Thus the Glance " "team recommends that Glance be run in its normal standalone configuration, " "particularly in production environments. If you choose to run Glance as a " "WSGI application in a web server, be sure to test your installation " "carefully with realistic usage scenarios." msgstr "" "As far as the Glance team can determine, the difficulties running Glance as " "a WSGI application are caused by issues external to Glance. Thus the Glance " "team recommends that Glance be run in its normal standalone configuration, " "particularly in production environments. If you choose to run Glance as a " "WSGI application in a web server, be sure to test your installation " "carefully with realistic usage scenarios." msgid "" "As is standard behavior for the image-list call, other filters may be " "applied to the request. For example, to see the community images supplied " "by user ``931efe8a-0ad7-4610-9116-c199f8807cda``, the following call would " "be made: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-" "c199f8807cda``" msgstr "" "As is standard behaviour for the image-list call, other filters may be " "applied to the request. For example, to see the community images supplied " "by user ``931efe8a-0ad7-4610-9116-c199f8807cda``, the following call would " "be made: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-" "c199f8807cda``" msgid "" "As mentioned above, the default visibility of an image is 'shared'. If a " "user wants an image to be private and not accept any members, a visibility " "of 'private' can be explicitly assigned at the time of creation." msgstr "" "As mentioned above, the default visibility of an image is 'shared'. If a " "user wants an image to be private and not accept any members, a visibility " "of 'private' can be explicitly assigned at the time of creation." msgid "" "As mentioned above, the same recommendation applies to the policy-based " "configuration for exposing multiple image locations." msgstr "" "As mentioned above, the same recommendation applies to the policy-based " "configuration for exposing multiple image locations." msgid "" "As of this release, the only service using the ``compressed`` container " "format is Cinder (Block Storage Service), when Cinder is configured to use " "compression when uploading a volume-image to Glance. While you may expect " "that Cinder will be able to consume any image in ``compressed`` container " "format *that Cinder has created*, you should not expect Cinder to be able to " "successfully use an image in ``compressed`` format that it has not created " "itself. Consult the `Cinder documentation `_ for more information." msgstr "" "As of this release, the only service using the ``compressed`` container " "format is Cinder (Block Storage Service), when Cinder is configured to use " "compression when uploading a volume-image to Glance. While you may expect " "that Cinder will be able to consume any image in ``compressed`` container " "format *that Cinder has created*, you should not expect Cinder to be able to " "successfully use an image in ``compressed`` format that it has not created " "itself. Consult the `Cinder documentation `_ for more information." msgid "" "As part of the multi-store efforts this release introduces deletion from " "single store. Through new '/v2/stores' endpoint the API user can request " "image to be deleted from single store instead of deleting the whole image. " "This feature can be used to clean up store metadata in cases where the image " "data has for some reason disappeared from the store already, except 410 Gone " "HTTP response." msgstr "" "As part of the multi-store efforts this release introduces deletion from " "single store. Through new '/v2/stores' endpoint the API user can request " "image to be deleted from single store instead of deleting the whole image. " "This feature can be used to clean up store metadata in cases where the image " "data has for some reason disappeared from the store already, except 410 Gone " "HTTP response." msgid "" "As per the revised SRBAC community goals, glance service is switching to new " "defaults by default in Antelope cycle, hence removing the deprecated " "``enforce_secure_rbac`` option which is no longer needed. The " "``enforce_secure_rbac`` option was introduced EXPERIMENTAL in Wallaby " "release for operators to opt into enforcing authorization based on common " "RBAC personas." msgstr "" "As per the revised SRBAC community goals, the Glance service is switching to " "new defaults by default in the Antelope cycle, hence removing the deprecated " "``enforce_secure_rbac`` option which is no longer needed. The " "``enforce_secure_rbac`` option was introduced EXPERIMENTAL in the Wallaby " "release for operators to opt into enforcing authorisation based on common " "RBAC personas." msgid "" "As with all container formats, Glance does not verify that the data payload " "of an image is actually in that format. Further, you should not expect " "other OpenStack services to be able to handle arbitrary compressed file " "formats. Consult the documentation of any services that will consume your " "image for details." msgstr "" "As with all container formats, Glance does not verify that the data payload " "of an image is actually in that format. Further, you should not expect " "other OpenStack services to be able to handle arbitrary compressed file " "formats. Consult the documentation of any services that will consume your " "image for details." msgid "" "Attempting to set image locations to an image *not* in ``active`` or " "``queued`` status will now result in a HTTP Conflict (HTTP status code 409) " "to the user." msgstr "" "Attempting to set image locations to an image *not* in ``active`` or " "``queued`` status will now result in a HTTP Conflict (HTTP status code 409) " "to the user." msgid "" "Automatic image conversion plugin for Interoperable Image Import. With this " "release operators can specify target image format and get all images created " "via the Image Import methods introduced in the Images API v2.6 converted " "automatically to that format. The feautre uses qemu-img under the hood which " "limits the source image formats that users can upload. Any image that fails " "the conversion when this plugin is enabled will fail the image creation." msgstr "" "Automatic image conversion plugin for Interoperable Image Import. With this " "release operators can specify target image format and get all images created " "via the Image Import methods introduced in the Images API v2.6 converted " "automatically to that format. The feature uses qemu-img which limits the " "source image formats that users can upload. Any image that fails the " "conversion when this plugin is enabled will fail the image creation." msgid "" "Be aware that if you define a policy rule for ``default`` or " "``context_is_admin``, that policy rule will also be used by the policies " "that govern permissions to perform actions using the Images API, even if " "these actions are not specified in the policy file." msgstr "" "Be aware that if you define a policy rule for ``default`` or " "``context_is_admin``, that policy rule will also be used by the policies " "that govern permissions to perform actions using the Images API, even if " "these actions are not specified in the policy file." msgid "Bug 1229823_: Handle file delete races in image cache" msgstr "Bug 1229823_: Handle file delete races in image cache" msgid "Bug 1482129_: Remove duplicate key from dictionary" msgstr "Bug 1482129_: Remove duplicate key from dictionary" msgid "Bug 1483353 v1 Updates using x-image-meta-id header provoke E500 or 200" msgstr "" "Bug 1483353 v1 Updates using x-image-meta-id header provoke E500 or 200" msgid "Bug 1504184 Glance does not error gracefully on token validation error" msgstr "Bug 1504184 Glance does not error gracefully on token validation error" msgid "" "Bug 1505474 Glance raise 500 error when delete images with unallowed status " "change" msgstr "" "Bug 1505474 Glance raise 500 error when delete images with unallowed status " "change" msgid "" "Bug 1505675 Flaky tasks test glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time" msgstr "" "Bug 1505675 Flaky tasks test glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time" msgid "Bug 1505710 Wrong logging setup in replicator" msgstr "Bug 1505710 Wrong logging setup in replicator" msgid "" "Bug 1512369 glance should declare a test-requirements.txt on swiftclient " "(for config generator)" msgstr "" "Bug 1512369 glance should declare a test-requirements.txt on swiftclient " "(for config generator)" msgid "Bug 1516706_: Prevent v1_api from making requests to v2_registry" msgstr "Bug 1516706_: Prevent v1_api from making requests to v2_registry" msgid "" "Bug 1517060 Users (without admin privileges) can change ACTIVE_IMMUTABLE " "properties of their own images when deactivated." msgstr "" "Bug 1517060 Users (without admin privileges) can change ACTIVE_IMMUTABLE " "properties of their own images when deactivated." msgid "" "Bug 1522132 Scrubber tests are broken due to deprecated config " "filesystem_store_datadir under DEFAULT section" msgstr "" "Bug 1522132 Scrubber tests are broken due to deprecated config " "filesystem_store_datadir under DEFAULT section" msgid "Bug 1554412_: Provide user friendly message for FK failure" msgstr "Bug 1554412_: Provide user friendly message for FK failure" msgid "Bug 1636243_: Add CPU Mode Metadata Def" msgstr "Bug 1636243_: Add CPU Mode Metadata Def" msgid "Bug 1647491_: Missing documentation for glance-manage db_purge command" msgstr "Bug 1647491_: Missing documentation for glance-manage db_purge command" msgid "Bug 1655727_: Invoke monkey_patching early enough for eventlet 0.20.1" msgstr "Bug 1655727_: Invoke monkey_patching early enough for eventlet 0.20.1" msgid "Bug 1657459_: Fix incompatibilities with WebOb 1.7" msgstr "Bug 1657459_: Fix incompatibilities with WebOb 1.7" msgid "Bug 1664709_: Do not serve partial image download requests from cache" msgstr "Bug 1664709_: Do not serve partial image download requests from cache" msgid "Bug 1686488_: Fix glance image-download error" msgstr "Bug 1686488_: Fix glance image-download error" msgid "Bug 1688189_: Fix member create to handle unicode characters" msgstr "Bug 1688189_: Fix member create to handle unicode characters" msgid "Bug 1695299_: Support RFC1738 quoted chars in passwords" msgstr "Bug 1695299_: Support RFC1738 quoted chars in passwords" msgid "Bug 1701346_: Fix trust auth mechanism" msgstr "Bug 1701346_: Fix trust auth mechanism" msgid "Bug 1714240_: Avoid restarting a child when terminating" msgstr "Bug 1714240_: Avoid restarting a child when terminating" msgid "Bug 1719252_: Metadefs: Fix 500 for name with more than 80 chars" msgstr "Bug 1719252_: Metadefs: Fix 500 for name with more than 80 chars" msgid "Bug 1720354_: Correctly send auth request to oslo.policy" msgstr "Bug 1720354_: Correctly send auth request to oslo.policy" msgid "Bug 1733813_: Fix 500 from image-import on queued images" msgstr "Bug 1733813_: Fix 500 from image-import on queued images" msgid "" "Bug 1734832_: Fix unreachable 'ImageSizeLimitExceeded' exception in image-" "upload" msgstr "" "Bug 1734832_: Fix unreachable 'ImageSizeLimitExceeded' exception in image-" "upload" msgid "Bug 1737952_: Fix 500 if custom property name is greater than 255" msgstr "Bug 1737952_: Fix 500 if custom property name is greater than 255" msgid "Bug 1744824_: Fix py27 eventlet issue <0.22.0" msgstr "Bug 1744824_: Fix py27 eventlet issue <0.22.0" msgid "Bug 1748916_: Glance default workers total overkill for modern servers" msgstr "Bug 1748916_: Glance default workers total overkill for modern servers" msgid "Bug 1749297_: Fix 500 from list-tasks call with postgresql" msgstr "Bug 1749297_: Fix 500 from list-tasks call with PostgreSQL" msgid "Bug 1750892_: Update status to active when locations replaced" msgstr "Bug 1750892_: Update status to active when locations replaced" msgid "Bug 1753964_: web-download fails with default node_staging_uri" msgstr "Bug 1753964_: web-download fails with default node_staging_uri" msgid "Bug 1754634_: Image Import call does not honour enabled methods" msgstr "Bug 1754634_: Image Import call does not honour enabled methods" msgid "Bug 1759510_: Image Import fails with Python 3.5" msgstr "Bug 1759510_: Image Import fails with Python 3.5" msgid "Bug 1765748_: Prepare for WebOb 1.8.1" msgstr "Bug 1765748_: Prepare for WebOb 1.8.1" msgid "Bug 1770410_: Use WebOb 1.8.1" msgstr "Bug 1770410_: Use WebOb 1.8.1" msgid "Bug 1779781_: virt/vmware not support VirtualSriovEthernetCard" msgstr "Bug 1779781_: virt/vmware not support VirtualSriovEthernetCard" msgid "Bug 1781617_: Rename ``async`` package to ``async_`` (Python 3.7)" msgstr "Bug 1781617_: Rename ``async`` package to ``async_`` (Python 3.7)" msgid "Bug 1781627_: Handle StopIteration for Py3.7 PEP 0479" msgstr "Bug 1781627_: Handle StopIteration for Py3.7 PEP 0479" msgid "Bug 1793057_: Provision to add new config options in sample config file" msgstr "" "Bug 1793057_: Provision to add new config options in sample config file" msgid "" "Bug 1795950_: Fix cleaning of web-download image import in node_staging_uri" msgstr "" "Bug 1795950_: Fix cleaning of web-download image import in node_staging_uri" msgid "Bug 1800601_: py3: fix recursion issue under py37" msgstr "Bug 1800601_: py3: fix recursion issue under py37" msgid "Bug 1802587_: Make location API compatible with multiple store" msgstr "Bug 1802587_: Make location API compatible with multiple store" msgid "Bug 1803299_: Failure in web-dowload kept image in importing state" msgstr "Bug 1803299_: Failure in web-dowload kept image in importing state" msgid "" "Bug 1803498_: Data remains in staging area if 'file' store is not enabled" msgstr "" "Bug 1803498_: Data remains in staging area if 'file' store is not enabled" msgid "Bug 1803643_: Fix for FK constraint violation" msgstr "Bug 1803643_: Fix for FK constraint violation" msgid "Bug 1805765_: Image conversion fails" msgstr "Bug 1805765_: Image conversion fails" msgid "Bug 1808063_: Guard __getattr__ on QuotaImageTagsProxy" msgstr "Bug 1808063_: Guard __getattr__ on QuotaImageTagsProxy" msgid "Bug 1808814_: admin docs: interoperable image import revision for stein" msgstr "" "Bug 1808814_: admin docs: interoperable image import revision for Stein" msgid "Bug 1808868_: Add SEV-related extra spec and image properties" msgstr "Bug 1808868_: Add SEV-related extra spec and image properties" msgid "Bug 1809462_: Correct typo in config option choices (Image conversion)" msgstr "Bug 1809462_: Correct typo in config option choices (Image conversion)" msgid "Bug 1818919_: py3: Fix return type on CooperativeReader.read" msgstr "Bug 1818919_: py3: Fix return type on CooperativeReader.read" msgid "" "Bug 1823703_: Wrong version URL when Glance is deployed behind proxy with " "vhost" msgstr "" "Bug 1823703_: Wrong version URL when Glance is deployed behind proxy with " "vhost" msgid "Bug 1836140_: Image deletion returns 500 if 'file' store is not enabled" msgstr "" "Bug 1836140_: Image deletion returns 500 if 'file' store is not enabled" msgid "" "Bug 1843576_: Glance metadefs is missing Image property " "hw_vif_multiqueue_enabled" msgstr "" "Bug 1843576_: Glance metadefs is missing Image property " "hw_vif_multiqueue_enabled" msgid "" "Bug 1850412_: Useful image properties in glance - os_admin_user not " "documented" msgstr "" "Bug 1850412_: Useful image properties in glance - os_admin_user not " "documented" msgid "Bug 1855708_: Reload broken in PY3" msgstr "Bug 1855708_: Reload broken in PY3" msgid "Bug 1855708_: Reload tests broken in Py3" msgstr "Bug 1855708_: Reload tests broken in Py3" msgid "Bug 1856578_: docs: image schema customization restrictions" msgstr "Bug 1856578_: docs: image schema customisation restrictions" msgid "Bug 1856581_: metadefs: OS::Glance::CommonImageProperties out of date" msgstr "Bug 1856581_: metadefs: OS::Glance::CommonImageProperties out of date" msgid "" "Bug 1861334_: cors config defaults not used when Glance is run as WSGI app" msgstr "" "Bug 1861334_: cors config defaults not used when Glance is run as WSGI app" msgid "" "Bug 1861501_: Store ID fetched from URI is incorrectly encoded under py27" msgstr "" "Bug 1861501_: Store ID fetched from URI is incorrectly encoded under py27" msgid "" "Bug 1861723_: Glance is listening on TCP socket before store initialization" msgstr "" "Bug 1861723_: Glance is listening on TCP socket before store initialisation" msgid "" "Bug 1863021_: eventlet monkey patch results in assert len(_active) == 1 " "AssertionError" msgstr "" "Bug 1863021_: eventlet monkey patch results in assert len(_active) == 1 " "AssertionError" msgid "Bug 1863879_: Multiple import fails if all-stores 'True' is passed" msgstr "Bug 1863879_: Multiple import fails if all-stores 'True' is passed" msgid "Bug 1870336_: Update 'common image properties' doc" msgstr "Bug 1870336_: Update 'common image properties' doc" msgid "Bug 1875629_: api-ref needs update about checksum image property" msgstr "Bug 1875629_: api-ref needs update about checksum image property" msgid "" "Bug 1876419_: Failed to parse json file /etc/glance/metadefs/compute-vmware." "json" msgstr "" "Bug 1876419_: Failed to parse JSON file /etc/glance/metadefs/compute-vmware." "json" msgid "" "Bug 1881958_: read-only http store should not be used if --all-stores " "specified for import/copy image workflow" msgstr "" "Bug 1881958_: read-only http store should not be used if --all-stores " "specified for import/copy image workflow" msgid "" "Bug 1884587_: image import copy-image API should reflect proper authorization" msgstr "" "Bug 1884587_: image import copy-image API should reflect proper authorisation" msgid "" "Bug 1884596_: A change was added to the import API which provides time-based " "locking of an image to exclude other import operations from starting until " "the lock-holding task completes. The lock is based on the task that we start " "to do the work, and the UUID of that task is stored in the " "``os_glance_import_task`` image property, which indicates who owns the lock. " "If the task holding the lock fails to make progress for 60 minutes, another " "import operation will be allowed to steal the lock and start another import " "operation." msgstr "" "Bug 1884596_: A change was added to the import API which provides time-based " "locking of an image to exclude other import operations from starting until " "the lock-holding task completes. The lock is based on the task that we start " "to do the work, and the UUID of that task is stored in the " "``os_glance_import_task`` image property, which indicates who owns the lock. " "If the task holding the lock fails to make progress for 60 minutes, another " "import operation will be allowed to steal the lock and start another import " "operation." msgid "Bug 1885003_: Interrupted copy-image may break a subsequent operation" msgstr "Bug 1885003_: Interrupted copy-image may break a subsequent operation" msgid "Bug 1885725_: 'copy-image' import job should not run additional plugins" msgstr "" "Bug 1885725_: 'copy-image' import job should not run additional plugins" msgid "Bug 1885928_: Unable to spawn VM from community image" msgstr "Bug 1885928_: Unable to spawn VM from community image" msgid "Bug 1886374_: Improve lazy loading mechanism for multiple stores" msgstr "Bug 1886374_: Improve lazy loading mechanism for multiple stores" msgid "Bug 1887099_: Invalid metadefs for watchdog" msgstr "Bug 1887099_: Invalid metadefs for watchdog" msgid "Bug 1887994_: Mixed message in admin docs to deploy under httpd" msgstr "Bug 1887994_: Mixed message in admin docs to deploy under httpd" msgid "Bug 1888349_: glance-cache-manage utility is broken" msgstr "Bug 1888349_: glance-cache-manage utility is broken" msgid "Bug 1888713_: Async tasks, image import not supported in pure-WSGI mode" msgstr "" "Bug 1888713_: Async tasks, image import not supported in pure-WSGI mode" msgid "Bug 1889640_: Image import might result 'active' image with no data." msgstr "Bug 1889640_: Image import might result 'active' image with no data." msgid "Bug 1889664_: Image Import 'web-download' is broken with py37+" msgstr "Bug 1889664_: Image Import 'web-download' is broken with py37+" msgid "" "Bug 1889676_: \"stores\" can be set as property breaking multistore " "indication of stores where the images are present" msgstr "" "Bug 1889676_: \"stores\" can be set as property breaking multistore " "indication of stores where the images are present" msgid "" "Bug 1891190_: test_reload() functional test causes hang and jobs TIMED_OUT" msgstr "" "Bug 1891190_: test_reload() functional test causes hang and jobs TIMED_OUT" msgid "" "Bug 1891352_: Failed import of one store will remain in progress forever if " "all_stores_must_succeed=True" msgstr "" "Bug 1891352_: Failed import of one store will remain in progress forever if " "all_stores_must_succeed=True" msgid "" "Bug 1895173_: Caught error: UPDATE statement on table 'image_properties'. " "expected to update 1 row(s); 0 were matched" msgstr "" "Bug 1895173_: Caught error: UPDATE statement on table 'image_properties'. " "expected to update 1 row(s); 0 were matched" msgid "" "Bug 1895663_: Image import \"web-download\" doesn't check on download size" msgstr "" "Bug 1895663_: Image import \"web-download\" doesn't check on download size" msgid "" "Bug 1905672_: Non existing property protection file raises 500 Internal " "server error" msgstr "" "Bug 1905672_: Non-existing property protection file raises 500 Internal " "server error" msgid "Bug 1913625_: Glance will leak staging data" msgstr "Bug 1913625_: Glance will leak staging data" msgid "Bug 1914826_: web download with invalid url does not report error" msgstr "Bug 1914826_: web download with invalid URL does not report error" msgid "Bug 1914826_: web-download with invalid url does not report error" msgstr "Bug 1914826_: web-download with invalid URL does not report error" msgid "" "Bug 1916011_: test_migrate_image_after_upgrade failing because of glance " "cinder store change" msgstr "" "Bug 1916011_: test_migrate_image_after_upgrade failing because of glance " "cinder store change" msgid "Bug 1916052_: Unable to Create trust errors in glance-api" msgstr "Bug 1916052_: Unable to Create trust errors in glance-api" msgid "Bug 1916052_: Unable to create trust errors in glance-api" msgstr "Bug 1916052_: Unable to create trust errors in glance-api" msgid "Bug 1922928_: Image Tasks API excludes in-progress tasks" msgstr "Bug 1922928_: Image Tasks API excludes in-progress tasks" msgid "Bug 1922928_: Image tasks API excludes in-progress tasks" msgstr "Bug 1922928_: Image tasks API excludes in-progress tasks" msgid "Bug 1930597_: Doc for \"Configuring SSL Support\" outdated in glance" msgstr "Bug 1930597_: Doc for \"Configuring SSL Support\" outdated in glance" msgid "" "Bug 1934673_: Policy deprecations falsely claims defaulting to role based " "policies" msgstr "" "Bug 1934673_: Policy deprecations falsely claims defaulting to role based " "policies" msgid "" "Bug 1934673_: Policy deprecations falsely claims defaulting to role based " "policies." msgstr "" "Bug 1934673_: Policy deprecations falsely claims defaulting to role based " "policies." msgid "Bug 1936665_: Functional tests not available for metadef resource types" msgstr "" "Bug 1936665_: Functional tests not available for metadef resource types" msgid "" "Bug 1937901_: healthcheck middleware should be deployed as app instead of " "filter" msgstr "" "Bug 1937901_: healthcheck middleware should be deployed as app instead of " "filter" msgid "Bug 1939169_: glance md-tag-create-multiple overwrites existing tags" msgstr "Bug 1939169_: glance md-tag-create-multiple overwrites existing tags" msgid "Bug 1939307_: glance-uwsgi - Add missing cache prefetching periodic job" msgstr "" "Bug 1939307_: glance-uwsgi - Add missing cache prefetching periodic job" msgid "" "Bug 1939690_: The api-ref response and the actual response returned from the " "Create Tags API does not match" msgstr "" "Bug 1939690_: The api-ref response and the actual response returned from the " "Create Tags API does not match" msgid "" "Bug 1939922_: Internal server error if shared member tries to stage data to " "image" msgstr "" "Bug 1939922_: Internal server error if shared member tries to stage data to " "image" msgid "" "Bug 1939944_: The parameters of the healthcheck middlewares are missing from " "glance-api.conf" msgstr "" "Bug 1939944_: The parameters of the healthcheck middlewares are missing from " "glance-api.conf" msgid "" "Bug 1940090_: options of the castellan library are missing from glance-api." "conf" msgstr "" "Bug 1940090_: options of the castellan library are missing from glance-api." "conf" msgid "" "Bug 1940733_: [oslo_reports] options are missing from the config file " "generated by oslo-confi-generator" msgstr "" "Bug 1940733_: [oslo_reports] options are missing from the config file " "generated by oslo-confi-generator" msgid "Bug 1946100_: [oslo_limit] parameters are missing from glance-api.conf" msgstr "Bug 1946100_: [oslo_limit] parameters are missing from glance-api.conf" msgid "Bug 1953063_: Image import causes SQL type casting error on PostgreSQL" msgstr "Bug 1953063_: Image import causes SQL type casting error on PostgreSQL" msgid "Bug 1954321_: Python3.10 error" msgstr "Bug 1954321_: Python3.10 error" msgid "Bug 1962480_: api-ref: versions response needs an update" msgstr "Bug 1962480_: api-ref: versions response needs an update" msgid "Bug 1962581_: bad default value for [wsgi] /python_interpreter option" msgstr "Bug 1962581_: bad default value for [wsgi] /python_interpreter option" msgid "Bug 1971176_: api-ref: cache manage needs improvements" msgstr "Bug 1971176_: api-ref: cache manage needs improvements" msgid "" "Bug 1972666_: Added cli_opts and cache_opts to support configgen to pick all " "groups from wsgi.py" msgstr "" "Bug 1972666_: Added cli_opts and cache_opts to support configgen to pick all " "groups from wsgi.py" msgid "Bug 1973136_: glance-multistore-cinder-import is failing consistently" msgstr "Bug 1973136_: glance-multistore-cinder-import is failing consistently" msgid "" "Bug 1973631_: List call for metadef namespaces returns 404 not found while " "fetching resource_types" msgstr "" "Bug 1973631_: List call for metadef namespaces returns 404 not found while " "fetching resource_types" msgid "Bug 1982426_: Python3.11: \"glance-manage\" crashes" msgstr "Bug 1982426_: Python3.11: \"glance-manage\" crashes" msgid "Bug 1983279_: Cannot upload vmdk images due to unsupported vmdk format" msgstr "Bug 1983279_: Cannot upload VMDK images due to unsupported VMDK format" msgid "Bug 1989268_: Wrong assertion method" msgstr "Bug 1989268_: Wrong assertion method" msgid "Bug 1990854_: oslo_limit section not clear" msgstr "Bug 1990854_: oslo_limit section not clear" msgid "" "Bug 1996188_: [OSSA-2023-002] Arbitrary file access through custom VMDK flat " "descriptor (CVE-2022-47951)" msgstr "" "Bug 1996188_: [OSSA-2023-002] Arbitrary file access through custom VMDK flat " "descriptor (CVE-2022-47951)" msgid "Bug 2007354_: duplicate values in compute-host-capabilities.json" msgstr "Bug 2007354_: duplicate values in compute-host-capabilities.json" msgid "" "Bug 2028895_: Interoperable Image Import in glance documented format for " "inject not working as expected" msgstr "" "Bug 2028895_: Interoperable Image Import in Glance documented format for " "inject not working as expected" msgid "Bug 2049064_: Unit/functional test failures with oslo.limit 2.3.0" msgstr "Bug 2049064_: Unit/functional test failures with oslo.limit 2.3.0" msgid "Bug 2059829_: Install and configure (Ubuntu) in glance" msgstr "Bug 2059829_: Install and configure (Ubuntu) in glance" msgid "" "Bug 2061947_: stores-info --detail command fails if swift store is enabled" msgstr "" "Bug 2061947_: stores-info --detail command fails if Swift store is enabled" msgid "" "Bug 2065087_: glance-cache-prefetcher is not working as threadpool is not set" msgstr "" "Bug 2065087_: glance-cache-prefetcher is not working as threadpool is not set" msgid "Bug 2072483_: Revert image status to queued if image conversion fails" msgstr "Bug 2072483_: Revert image status to queued if image conversion fails" msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Bug `1971521 `_: Fixed the " "success response code of the REST API call ``PUT /v2/cache/{image_id}`` to " "be 202 (Accepted), following the original design of the feature." msgstr "" "Bug `1971521 `_: Fixed the " "success response code of the REST API call ``PUT /v2/cache/{image_id}`` to " "be 202 (Accepted), following the original design of the feature." msgid "" "Bug `1980049 `_: Fixed the " "success response code of the REST API call ``DELETE /v2/cache/{image_id}`` " "and ``DELETE /v2/cache`` to be 204 (No Content), following the original " "design of the feature." msgstr "" "Bug `1980049 `_: Fixed the " "success response code of the REST API call ``DELETE /v2/cache/{image_id}`` " "and ``DELETE /v2/cache`` to be 204 (No Content), following the original " "design of the feature." msgid "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Ocata release of OpenStack uses eventlet 0.19.0.) " "See Bug 1747305_ for details." msgstr "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Ocata release of OpenStack uses eventlet 0.19.0.) " "See Bug 1747305_ for details." msgid "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Pike release of OpenStack uses eventlet 0.20.0.) See " "Bug 1747304_ for details." msgstr "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Pike release of OpenStack uses eventlet 0.20.0.) See " "Bug 1747304_ for details." msgid "" "Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API " "has been `removed`_ from the Glance codebase." msgstr "" "Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API " "has been `removed`_ from the Glance codebase." msgid "" "Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL API " "has been removed from the Glance codebase, as it was relocated into an " "independent `Glare`_ project repository during a previous release cycle. The " "database upgrade for the Glance Pike release drops the Glare tables (named " "'artifacts' and 'artifact_*') from the Glance database." msgstr "" "Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL API " "has been removed from the Glance codebase, as it was relocated into an " "independent `Glare`_ project repository during a previous release cycle. The " "database upgrade for the Glance Pike release drops the Glare tables (named " "'artifacts' and 'artifact_*') from the Glance database." msgid "" "Correction of API response code for PUT /v2/cache/{image_id} from HTTP 200 " "to HTTP 202. (`Bug 1971521 `_)" msgstr "" "Correction of API response code for PUT /v2/cache/{image_id} from HTTP 200 " "to HTTP 202. (`Bug 1971521 `_)" msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Currently, we are experiencing some problems in the gate when Glance is " "configured to run in devstack following the guidelines recommended in the " "documentation. You can follow `Bug 1703856`_ to learn more." msgstr "" "Currently, we are experiencing some problems in the gate when Glance is " "configured to run in devstack following the guidelines recommended in the " "documentation. You can follow `Bug 1703856`_ to learn more." msgid "Database downgrades have been removed from the Glance source tree." msgstr "Database downgrades have been removed from the Glance source tree." msgid "" "Database versions are no longer numerical. Instead, they are the *revision " "ID* of the last migration applied on the database." msgstr "" "Database versions are no longer numerical. Instead, they are the *revision " "ID* of the last migration applied on the database." msgid "" "Deprecate the ``show_multiple_locations`` configuration option in favor of " "the existing Role Based Access Control (RBAC) for Image locations which uses " "``policy.json`` file to define the appropriate rules." msgstr "" "Deprecate the ``show_multiple_locations`` configuration option in favour of " "the existing Role Based Access Control (RBAC) for Image locations which uses " "``policy.json`` file to define the appropriate rules." msgid "" "Deprecated \"sign-the-hash\" approach for image signing. Old run_tests and " "related scripts have been removed." msgstr "" "Deprecated \"sign-the-hash\" approach for image signing. Old run_tests and " "related scripts have been removed." msgid "" "Deprecated values are no longer recognized for the configuration option " "``store_type_preference``. The two non-standard values 'filesystem' and " "'vmware_datastore' were DEPRECATED in Newton and are no longer operable. The " "correct values for those stores are 'file' and 'vmware'. See the Newton " "release notes for more information at https://docs.openstack.org/" "releasenotes/glance/newton.html#upgrade-notes" msgstr "" "Deprecated values are no longer recognized for the configuration option " "``store_type_preference``. The two non-standard values 'filesystem' and " "'vmware_datastore' were DEPRECATED in Newton and are no longer operable. The " "correct values for those stores are 'file' and 'vmware'. See the Newton " "release notes for more information at https://docs.openstack.org/" "releasenotes/glance/newton.html#upgrade-notes" msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Documentation examples were changed from ``openstack`` commands back to " "``glance``. This should help avoid the frustration of glance-community " "maintaining different client than what is referred in examples. 'python-" "glanceclient' is and will be the reference implementation of Images API and " "the team will implement all API changes to the relevant client version of " "the cycle as well." msgstr "" "Documentation examples were changed from ``openstack`` commands back to " "``glance``. This should help avoid the frustration of Glance-community " "maintaining different client than what is referred in examples. 'python-" "glanceclient' is and will be the reference implementation of Images API and " "the team will implement all API changes to the relevant client version of " "the cycle as well." msgid "Dropped support for python 2.7" msgstr "Dropped support for Python 2.7" msgid "" "Due to some unresolved issues on consuming multiple backends work the " "stabilization of the feature has been pushed to Train and will stay " "EXPERIMENTAL on Stein." msgstr "" "Due to some unresolved issues on consuming multiple backends work the " "stabilisation of the feature has been pushed to Train and will stay " "EXPERIMENTAL on Stein." msgid "" "Due to the bug in the glance_store implementation of multihash feature the " "first stable Rocky release (0.26.0) of glance_store does not work with " "Glance 17.0.0. Please note that version 0.26.1+ of the store library is " "required. Image creations will fail when the data is tried to be uploaded to " "the back-end due to missing wrapping of the function used." msgstr "" "Due to the bug in the glance_store implementation of multihash feature the " "first stable Rocky release (0.26.0) of glance_store does not work with " "Glance 17.0.0. Please note that version 0.26.1+ of the store library is " "required. Image creations will fail when the data is tried to be uploaded to " "the back-end due to missing wrapping of the function used." msgid "" "During upgrade from single cinder store to multiple cinder stores, legacy " "images location url will be updated to the new format with respect to the " "volume type configured in the stores. Legacy location url: cinder:// New location url: cinder:///" msgstr "" "During upgrade from single Cinder store to multiple Cinder stores, legacy " "images location URL will be updated to the new format with respect to the " "volume type configured in the stores. Legacy location URL: cinder:// New location URL: cinder:///" msgid "" "During validation of hashing data when do_secure_hash is `false`, we can " "just validate length expected for hash_algo and not actual expected hash " "value. If garbage hash_value with expected size has been provided, image " "becomes active after adding location but it will be of no use as download or " "boot will fail with corrupt image error." msgstr "" "During validation of hashing data when do_secure_hash is `false`, we can " "just validate length expected for hash_algo and not actual expected hash " "value. If garbage hash_value with expected size has been provided, image " "becomes active after adding location but it will be of no use as download or " "boot will fail with corrupt image error." msgid "" "Ensure that the version 0.26.1 or higher of glance_store library is used." msgstr "" "Ensure that the version 0.26.1 or higher of glance_store library is used." msgid "" "Existing authorization and policies will continue to work, but we encourage " "operators to review the new policies and consolidate any redundant overrides " "with the new defaults if possible. Please review the feature section above " "for more details." msgstr "" "Existing authorisation and policies will continue to work, but we encourage " "operators to review the new policies and consolidate any redundant overrides " "with the new defaults if possible. Please review the feature section above " "for more details." msgid "" "Expired tasks are now deleted in Glance. As with other Glance resources, " "this is a \"soft\" deletion, that is, a deleted task is marked as " "``deleted`` in the database so that the task will not appear in API " "responses, but the information associated with the task persists in the " "database." msgstr "" "Expired tasks are now deleted in Glance. As with other Glance resources, " "this is a \"soft\" deletion, that is, a deleted task is marked as " "``deleted`` in the database so that the task will not appear in API " "responses, but the information associated with the task persists in the " "database." msgid "Expired tasks are now deleted." msgstr "Expired tasks are now deleted." msgid "" "Finally, there are no changes to the version 2.5 API in this release. All " "version 2.5 calls will work whether the new import functionality is enabled " "or not." msgstr "" "Finally, there are no changes to the version 2.5 API in this release. All " "version 2.5 calls will work whether the new import functionality is enabled " "or not." msgid "" "Fixed some important bugs around copy-image import method and importing " "image to multiple stores" msgstr "" "Fixed some important bugs around copy-image import method and importing " "image to multiple stores" msgid "" "Fixed some important bugs around multi-store imports and precaching images" msgstr "" "Fixed some important bugs around multi-store imports and precaching images" msgid "" "Fixing bug 1525915; image might be transitioning from active to queued by " "regular user by removing last location of image (or replacing locations with " "empty list). This allows user to re-upload data to the image breaking " "Glance's promise of image data immutability. From now on, last location " "cannot be removed and locations cannot be replaced with empty list." msgstr "" "Fixing bug 1525915; image might be transitioning from active to queued by " "regular user by removing last location of image (or replacing locations with " "empty list). This allows user to re-upload data to the image breaking " "Glance's promise of image data immutability. From now on, last location " "cannot be removed and locations cannot be replaced with empty list." msgid "For example, ``GET v2/images?visibility=community``" msgstr "For example, ``GET v2/images?visibility=community``" msgid "" "For example, a user is not allowed to add a location to an image in " "``saving`` status. Suppose a user decides to add a location anyway. It is " "possible that before the user's request is processed, the transmission of " "data being saved is completed and the image transitioned into ``active`` " "status, in which case the user's add location request will succeed. To the " "user, however, this success will appear anomalous because in most cases, an " "attempt to add a location to an image in ``saving`` status will fail." msgstr "" "For example, a user is not allowed to add a location to an image in " "``saving`` status. Suppose a user decides to add a location anyway. It is " "possible that before the user's request is processed, the transmission of " "data being saved is completed and the image transitioned into ``active`` " "status, in which case the user's add location request will succeed. To the " "user, however, this success will appear anomalous because in most cases, an " "attempt to add a location to an image in ``saving`` status will fail." msgid "" "For example, configuration options specifying counts, where a negative value " "was undefined, would have still accepted the supplied negative value. Such " "options will no longer accept negative values." msgstr "" "For example, configuration options specifying counts, where a negative value " "was undefined, would have still accepted the supplied negative value. Such " "options will no longer accept negative values." msgid "" "For example, the Liberty migration, which was version ``42`` under the old " "system, will now appear as ``liberty``. The Mitaka migrations ``43`` and " "``44`` appear as ``mitaka01`` and ``mitaka02``, respectively." msgstr "" "For example, the Liberty migration, which was version ``42`` under the old " "system, will now appear as ``liberty``. The Mitaka migrations ``43`` and " "``44`` appear as ``mitaka01`` and ``mitaka02``, respectively." msgid "" "For more information, see the Glance specification document `Actually " "Deprecate the Glance Registry `_." msgstr "" "For more information, see the Glance specification document `Actually " "Deprecate the Glance Registry `_." msgid "" "For the Newton release, this option will still be honored. However, it is " "important to update ``policy.json`` file for glance-api nodes. In " "particular, please consider updating the policies ``delete_image_location``, " "``get_image_location`` and ``set_image_location`` as per your requirements. " "As this is an advanced option and prone to expose some risks, please check " "the policies to ensure security and privacy of your cloud." msgstr "" "For the Newton release, this option will still be honoured. However, it is " "important to update ``policy.json`` file for glance-api nodes. In " "particular, please consider updating the policies ``delete_image_location``, " "``get_image_location`` and ``set_image_location`` as per your requirements. " "As this is an advanced option and prone to expose some risks, please check " "the policies to ensure security and privacy of your cloud." msgid "" "For the Pike release, the legacy Glare code has been removed from the Glance " "code repository and the legacy 'artifacts' and 'artifact_*' database tables " "are dropped from the Glance database. As the Artifacts service API was an " "EXPERIMENTAL API in Glance and has not used the Glance database since " "Mitaka, no provision is made for migrating data from the Glance database to " "the Glare database." msgstr "" "For the Pike release, the legacy Glare code has been removed from the Glance " "code repository and the legacy 'artifacts' and 'artifact_*' database tables " "are dropped from the Glance database. As the Artifacts service API was an " "EXPERIMENTAL API in Glance and has not used the Glance database since " "Mitaka, no provision is made for migrating data from the Glance database to " "the Glare database." msgid "" "For the details on what changed from the existing policy, please refer to " "the `RBAC new guidelines`_. We have implemented only phase-1 of the `RBAC " "new guidelines`_. Currently, scope checks and new defaults are disabled by " "default. You can enable them by switching the below config option in " "``glance.conf`` file::" msgstr "" "For the details on what changed from the existing policy, please refer to " "the `RBAC new guidelines`_. We have implemented only phase-1 of the `RBAC " "new guidelines`_. Currently, scope checks and new defaults are disabled by " "default. You can enable them by switching the below config option in " "``glance.conf`` file::" msgid "" "Formerly, it was possible to add members to an image whose visibility was " "``private``, thereby creating a \"shared\" image. In this release, an image " "must have a visibility of ``shared`` in order to accept member operations. " "Attempting to add a member to an image with a visibility of ``private`` will " "result in a `4xx response`_ containing an informative message." msgstr "" "Formerly, it was possible to add members to an image whose visibility was " "``private``, thereby creating a \"shared\" image. In this release, an image " "must have a visibility of ``shared`` in order to accept member operations. " "Attempting to add a member to an image with a visibility of ``private`` will " "result in a `4xx response`_ containing an informative message." msgid "" "Future releases will ignore this option and just follow the policy rules. It " "is recommended that this option is disabled for public endpoints and is used " "only internally for service-to-service communication." msgstr "" "Future releases will ignore this option and just follow the policy rules. It " "is recommended that this option is disabled for public endpoints and is used " "only internally for service-to-service communication." msgid "Glance API **CURRENT** ``minor`` version is now ``2.4``." msgstr "Glance API **CURRENT** ``minor`` version is now ``2.4``." msgid "Glance API ``minor`` version bumped to 2.4." msgstr "Glance API ``minor`` version bumped to 2.4." msgid "Glance Release Notes" msgstr "Glance Release Notes" msgid "" "Glance and Nova contain nearly identical digital signature modules. In order " "to better maintain and evolve this code and to eliminate the possibility " "that the modules diverge, we have replaced the digital signature module in " "Glance with the new ``cursive`` library." msgstr "" "Glance and Nova contain nearly identical digital signature modules. In order " "to better maintain and evolve this code and to eliminate the possibility " "that the modules diverge, we have replaced the digital signature module in " "Glance with the new ``cursive`` library." msgid "" "Glance had been accepting the Content-Range header for GET v2/images/" "{image_id}/file requests, contrary to RFC 7233. Following RFC 7233, Glance " "will now:" msgstr "" "Glance had been accepting the Content-Range header for GET v2/images/" "{image_id}/file requests, contrary to RFC 7233. Following RFC 7233, Glance " "will now:" msgid "" "Glance is now packaged with a WSGI script entrypoint, enabling it to be run " "as a WSGI application hosted by a performant web server. See `Running " "Glance in HTTPD `_ in the Glance documentation for details." msgstr "" "Glance is now packaged with a WSGI script entrypoint, enabling it to be run " "as a WSGI application hosted by a performant web server. See `Running " "Glance in HTTPD `_ in the Glance documentation for details." msgid "" "Glance no longer returns a 500 when 4 byte unicode characters are passed to " "the metadefs API." msgstr "" "Glance no longer returns a 500 when 4 byte Unicode characters are passed to " "the metadefs API." msgid "" "Glance now has per-tenant quota support based on Keystone unified limits for " "resources like image and staging storage, among other things. For more " "information about how to configure and use these quotas, refer to the " "relevant section of the `Administrator Guide `_." msgstr "" "Glance now has per-tenant quota support based on Keystone unified limits for " "resources like image and staging storage, among other things. For more " "information about how to configure and use these quotas, refer to the " "relevant section of the `Administrator Guide `_." msgid "" "Glance now prevents setting or modifying image properties that are within " "the ``os_glance`` reserved namespace. Previously, individual properties " "(such as ``os_glance_importing_to_stores``) were inconsistently disallowed, " "but now the entire namespace is enforced." msgstr "" "Glance now prevents setting or modifying image properties that are within " "the ``os_glance`` reserved namespace. Previously, individual properties " "(such as ``os_glance_importing_to_stores``) were inconsistently disallowed, " "but now the entire namespace is enforced." msgid "" "Glance now provides more granular RBAC access to the images API via default " "personas. This work is marked as experimental in Wallaby, and will be " "supported in a future release." msgstr "" "Glance now provides more granular RBAC access to the images API via default " "personas. This work is marked as experimental in Wallaby, and will be " "supported in a future release." msgid "" "Glance now ships experimental policies that support read-only image " "permissions. Users with the `reader` role on a project will be able to view " "generic image data, without the ability to make writeable changes using the " "images API. Please review the features section above for more information on " "enabling this functionality." msgstr "" "Glance now ships experimental policies that support read-only image " "permissions. Users with the `reader` role on a project will be able to view " "generic image data, without the ability to make writeable changes using the " "images API. Please review the features section above for more information on " "enabling this functionality." msgid "" "Glance now supports the ``glance-direct`` import method without needing " "shared storage common to all API workers. By telling each API worker the URL " "by which it can be reached directly (from the other workers), a shared " "staging directory can be avoided while still allowing users to upload their " "data for import. See the ``worker_self_reference_url`` config option for " "more details, as well as the `Interoperable Image Import `_ docs." msgstr "" "Glance now supports the ``glance-direct`` import method without needing " "shared storage common to all API workers. By telling each API worker the URL " "by which it can be reached directly (from the other workers), a shared " "staging directory can be avoided while still allowing users to upload their " "data for import. See the ``worker_self_reference_url`` config option for " "more details, as well as the `Interoperable Image Import `_ docs." msgid "" "Glance now uses the `python 'cryptography' module`_ instead of the " "'pycrypto' module." msgstr "" "Glance now uses the `python 'cryptography' module`_ instead of the " "'pycrypto' module." msgid "Glance services can now run on Windows." msgstr "Glance services can now run on Windows." msgid "" "Glance to glance image import plugin. With this release users can import an " "image from an other glance server from an other opensatck region. The two " "glance services must use the same keystone service. The feature use the same " "keystone authentication token on both glance services and copy by default " "container_format, disk_format and customizable properties from source image " "``['hw_', 'trait:', 'os_distro', 'os_secure_boot', 'os_type']``" msgstr "" "Glance to glance image import plugin. With this release, users can import an " "image from another Glance server from another OpenStack region. The two " "glance services must use the same keystone service. The feature use the same " "Keystone authentication token on both Glance services and copy by default " "container_format, disk_format and customizable properties from source image " "``['hw_', 'trait:', 'os_distro', 'os_secure_boot', 'os_type']``" msgid "" "Glance uses the ``cursive`` library's functionality to verify digital " "signatures. To familiarize yourself with this new dependency and see the " "list of transitive dependencies visit http://git.openstack.org/cgit/" "openstack/cursive" msgstr "" "Glance uses the ``cursive`` library's functionality to verify digital " "signatures. To familiarise yourself with this new dependency and see the " "list of transitive dependencies visit http://git.openstack.org/cgit/" "openstack/cursive" msgid "" "Glance's default policies for metadef APIs now support member and reader " "roles for Secure RBAC project persona. Administrative operations like " "create, delete and update are still protected using the `admin` role on a " "project. Administrative actions will be updated in the future to consume " "system-scope." msgstr "" "Glance's default policies for metadef APIs now support member and reader " "roles for Secure RBAC project persona. Administrative operations like " "create, delete and update are still protected using the `admin` role on a " "project. Administrative actions will be updated in the future to consume " "system-scope." msgid "" "Glance's default policies now use the `member` role on projects to protect " "writeable and readable image actions. Support was also added for read-only " "access to image resources when the `reader` role is granted to users on a " "project. Administrative operations, like creating public images, is still " "protected using the `admin` role on a project. Administrative actions will " "be updated in the future to consume system-scope." msgstr "" "Glance's default policies now use the `member` role on projects to protect " "writeable and readable image actions. Support was also added for read-only " "access to image resources when the `reader` role is granted to users on a " "project. Administrative operations, like creating public images, is still " "protected using the `admin` role on a project. Administrative actions will " "be updated in the future to consume system-scope." msgid "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to remove an image location when the image status is not ``active``" msgstr "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to remove an image location when the image status is not ``active``" msgid "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to replace an image location when the image status is not ``active`` or " "``queued``" msgstr "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to replace an image location when the image status is not ``active`` or " "``queued``" msgid "" "Here is a list of other important bugs that have been fixed (or partially " "fixed) along with their descriptions." msgstr "" "Here is a list of other important bugs that have been fixed (or partially " "fixed) along with their descriptions." msgid "Here is a list of possible return codes:" msgstr "Here is a list of possible return codes:" msgid "" "If ``enable_image_import`` is set **False**, requests to the v2 endpoint for " "URIs defined only in v2.6 will return 404 (Not Found) with a message in the " "response body stating \"Image import is not supported at this site.\" " "Additionally, the image-create response will not contain the \"OpenStack-" "image-import-methods\" header." msgstr "" "If ``enable_image_import`` is set **False**, requests to the v2 endpoint for " "URIs defined only in v2.6 will return 404 (Not Found) with a message in the " "response body stating \"Image import is not supported at this site.\" " "Additionally, the image-create response will not contain the \"OpenStack-" "image-import-methods\" header." msgid "" "If an image has a visiblity of 'private' when viewed in the v2 API, then " "that image will not accept members in the v1 API. If a user wants to share " "such an image, the user can:" msgstr "" "If an image has a visibility of 'private' when viewed in the v2 API, then " "that image will not accept members in the v1 API. If a user wants to share " "such an image, the user can:" msgid "" "If configured to work in daemon mode, the Scrubber will log an error message " "at level critical, but will not exit the process." msgstr "" "If configured to work in daemon mode, the Scrubber will log an error message " "at level critical, but will not exit the process." msgid "" "If configured to work in non-daemon mode, the Scrubber will log an error " "message at level critical and exit with status one." msgstr "" "If configured to work in non-daemon mode, the Scrubber will log an error " "message at level critical and exit with status one." msgid "" "If signature verification fails upon creation, the image data will stay in " "the backend even though the image goes to 'killed' state. The data remains " "even if a user deletes the killed image record." msgstr "" "If signature verification fails upon creation, the image data will stay in " "the backend even though the image goes to 'killed' state. The data remains " "even if a user deletes the killed image record." msgid "" "If the ``cinder_encryption_key_deletion_policy`` image property is missing " "or has any value other than ``on_image_deletion``, Glance will **not** " "attempt to delete the key whose identifier is the value of " "``cinder_encryption_key_id``." msgstr "" "If the ``cinder_encryption_key_deletion_policy`` image property is missing " "or has any value other than ``on_image_deletion``, Glance will **not** " "attempt to delete the key whose identifier is the value of " "``cinder_encryption_key_id``." msgid "" "If the existing ``policy.json`` file relies on the ``default`` rule for some " "policies (i.e. not all policies are explicitly specified in the file) then " "the ``default`` rule must be explicitly set (e.g. to ``\"role:admin\"``) in " "the file. The new default value for the ``default`` rule is ``\"\"``, " "whereas since the Queens release it has been ``\"role:admin\"`` (prior to " "Queens it was ``\"@\"``, which allows everything). After upgrading to this " "release, the policy file should be replaced by one that overrides only " "policies that need to be different from the defaults, without relying on the " "``default`` rule." msgstr "" "If the existing ``policy.json`` file relies on the ``default`` rule for some " "policies (i.e. not all policies are explicitly specified in the file) then " "the ``default`` rule must be explicitly set (e.g. to ``\"role:admin\"``) in " "the file. The new default value for the ``default`` rule is ``\"\"``, " "whereas since the Queens release it has been ``\"role:admin\"`` (prior to " "Queens it was ``\"@\"``, which allows everything). After upgrading to this " "release, the policy file should be replaced by one that overrides only " "policies that need to be different from the defaults, without relying on the " "``default`` rule." msgid "" "If upgrade is conducted from PY27 where ssl connections has been terminated " "into glance-api, the termination needs to happen externally from now on." msgstr "" "If upgrade is conducted from PY27 where SSL connections has been terminated " "into glance-api, the termination needs to happen externally from now on." msgid "" "If you are using the multistore feature, you must define configuration " "options for ``os_glance_tasks_store`` and ``os_glance_staging_store`` in the " "``glance-scrubber.conf`` file. See the \"Reserved Stores\" section of the " "\"Multi Store Support\" chapter of the Glance Administration Guide for more " "information." msgstr "" "If you are using the multistore feature, you must define configuration " "options for ``os_glance_tasks_store`` and ``os_glance_staging_store`` in the " "``glance-scrubber.conf`` file. See the \"Reserved Stores\" section of the " "\"Multi Store Support\" chapter of the Glance Administration Guide for more " "information." msgid "" "If you choose to use the Barbican secret identified by the value of " "``cinder_encryption_key_id`` for any other purpose, you risk data loss." msgstr "" "If you choose to use the Barbican secret identified by the value of " "``cinder_encryption_key_id`` for any other purpose, you risk data loss." msgid "" "If you use the Glance multiple stores feature, introduced on an experimental " "basis in Rocky and now fully supported in the Train release, then you *must* " "use backing stores instead of ``work_dir`` and ``node_staging_uri`` for " "Glance's temporary storage **beginning right now with the current " "release**. See the \"Reserved Stores\" section of the \"Multi Store Support" "\" chapter of the Glance Administration Guide for more information." msgstr "" "If you use the Glance multiple stores feature, introduced on an experimental " "basis in Rocky and now fully supported in the Train release, then you *must* " "use backing stores instead of ``work_dir`` and ``node_staging_uri`` for " "Glance's temporary storage **beginning right now with the current " "release**. See the \"Reserved Stores\" section of the \"Multi Store Support" "\" chapter of the Glance Administration Guide for more information." msgid "" "If you want to disable them then modify the below config options value in " "``glance-api.conf`` file::" msgstr "" "If you want to disable them then modify the below config options value in " "``glance-api.conf`` file::" msgid "" "If you were previously aware of this option and were actually using it, we " "apologize for the inconvenience its removal will cause, but overall it will " "be better for everyone if policy configuration is confined to the policy " "configuration file and this backdoor is eliminated. The migration path is " "to explictly mention the role you configured for this option in appropriate " "places in your policy configuration file." msgstr "" "If you were previously aware of this option and were actually using it, we " "apologize for the inconvenience its removal will cause, but overall it will " "be better for everyone if policy configuration is confined to the policy " "configuration file and this backdoor is eliminated. The migration path is " "to explicitly mention the role you configured for this option in appropriate " "places in your policy configuration file." msgid "" "If you wish to enable the EXPERIMENTAL version 2.6 API that contains the new " "interoperable image import functionality, set the configuration option " "``enable_image_import`` to True in the glance-api.conf file. The default " "value for this option is False." msgstr "" "If you wish to enable the EXPERIMENTAL version 2.6 API that contains the new " "interoperable image import functionality, set the configuration option " "``enable_image_import`` to True in the glance-api.conf file. The default " "value for this option is False." msgid "" "If you wish to use the backend store feature now, please see the \"Reserved " "Stores\" section of the \"Multi Store Support\" chapter of the Glance " "Administration Guide for configuration information." msgstr "" "If you wish to use the backend store feature now, please see the \"Reserved " "Stores\" section of the \"Multi Store Support\" chapter of the Glance " "Administration Guide for configuration information." msgid "Image 'visibility' changes." msgstr "Image 'visibility' changes." msgid "Image Import API failure with PY35 has been fixed." msgstr "Image Import API failure with PY35 has been fixed." msgid "" "Image Import call accepting all implemented methods instead of configured " "ones has been rectified. It only accepts import calls for the methods listed " "in the config option 'enabled_import_methods'" msgstr "" "Image Import call accepting all implemented methods instead of configured " "ones has been rectified. It only accepts import calls for the methods listed " "in the config option 'enabled_import_methods'" msgid "" "Image Import method 'web-download' failure when 'node_staging_uri' ends with " "'/' has been fixed." msgstr "" "Image Import method 'web-download' failure when 'node_staging_uri' ends with " "'/' has been fixed." msgid "" "Image location updates to an image which is not in ``active`` or ``queued`` " "status can introduce race conditions and security issues and hence a bad " "experience for users and operators. As a result, we have restricted image " "location updates in this release. Users will now observe the following:" msgstr "" "Image location updates to an image which is not in ``active`` or ``queued`` " "status can introduce race conditions and security issues and hence a bad " "experience for users and operators. As a result, we have restricted image " "location updates in this release. Users will now observe the following:" msgid "Image visibility is changed using the image update (PATCH) call." msgstr "Image visibility is changed using the image update (PATCH) call." msgid "Image visibility may be specified at the time of image creation." msgstr "Image visibility may be specified at the time of image creation." msgid "" "Images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) **and** that have image members, will " "have their visibility set to 'shared'." msgstr "" "Images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) **and** that have image members, will " "have their visibility set to 'shared'." msgid "" "Images in the qcow2 format with an external data file are now rejected from " "glance because such images could be used in an exploit to expose host " "information. See `Bug #2059809 `_ for details." msgstr "" "Images in the qcow2 format with an external data file are now rejected from " "glance because such images could be used in an exploit to expose host " "information. See `Bug #2059809 `_ for details." msgid "" "Impact of the Ocata visibility changes on end users of the Images API v2" msgstr "" "Impact of the Ocata visibility changes on end users of the Images API v2" msgid "Impact of the Ocata visibility changes on the Images API v1" msgstr "Impact of the Ocata visibility changes on the Images API v1" msgid "" "Implement the ability to filter images by the properties `id`, `name`, " "`status`,`container_format`, `disk_format` using the 'in' operator between " "the values. Following the pattern of existing filters, new filters are " "specified as query parameters using the field to filter as the key and the " "filter criteria as the value in the parameter. Filtering based on the " "principle of full compliance with the template, for example 'name = in:deb' " "does not match 'debian'. Changes apply exclusively to the API v2 Image " "entity listings An example of an acceptance criteria using the 'in' operator " "for name ?name=in:name1,name2,name3. These filters were added using syntax " "that conforms to the latest guidelines from the OpenStack API Working Group." msgstr "" "Implement the ability to filter images by the properties `id`, `name`, " "`status`,`container_format`, `disk_format` using the 'in' operator between " "the values. Following the pattern of existing filters, new filters are " "specified as query parameters using the field to filter as the key and the " "filter criteria as the value in the parameter. Filtering based on the " "principle of full compliance with the template, for example 'name = in:deb' " "does not match 'debian'. Changes apply exclusively to the API v2 Image " "entity listings An example of an acceptance criteria using the 'in' operator " "for name ?name=in:name1,name2,name3. These filters were added using syntax " "that conforms to the latest guidelines from the OpenStack API Working Group." msgid "Implemented Secure RBAC project scope for metadef APIs" msgstr "Implemented Secure RBAC project scope for metadef APIs" msgid "" "Implemented re-authentication with trusts when updating image status in " "registry after image upload. When long-running image upload takes some a lot " "of time (more than token expiration time) glance uses trusts to receive new " "token and update image status in registry. It allows users to upload big " "size images without increasing token expiration time." msgstr "" "Implemented re-authentication with trusts when updating image status in " "registry after image upload. When long-running image upload takes some a lot " "of time (more than token expiration time) glance uses trusts to receive new " "token and update image status in registry. It allows users to upload big " "size images without increasing token expiration time." msgid "Improved configuration option descriptions and handling." msgstr "Improved configuration option descriptions and handling." msgid "Improved performance of rbd store chunk upload" msgstr "Improved performance of rbd store chunk upload" msgid "" "In Newton, the majority of the signature verification code was removed from " "Glance. ``cursive`` has been added to Glance as a dependency and will be " "installed by default." msgstr "" "In Newton, the majority of the signature verification code was removed from " "Glance. ``cursive`` has been added to Glance as a dependency and will be " "installed by default." msgid "" "In accord with current OpenStack policy, Glance log messages are `no longer " "translated`_." msgstr "" "In accord with current OpenStack policy, Glance log messages are `no longer " "translated`_." msgid "" "In case of ``http`` store if bad value is passed for ``os_hash_value`` in " "validation data then task fails which is expected but it stores location of " "the image which is wrong, that needs to be popped out. The location doesn't " "get deleted because deletion of locatio is not allowed for ``http`` store. " "Here image needs to be deleted as it is of no use." msgstr "" "In case of ``http`` store if bad value is passed for ``os_hash_value`` in " "validation data then task fails which is expected but it stores location of " "the image which is wrong, that needs to be popped out. The location doesn't " "get deleted because deletion of location is not allowed for ``http`` store. " "Here image needs to be deleted as it is of no use." msgid "" "In order to check the current state of your database upgrades, you may run " "the command ``glance-manage db check``. This will inform you of any " "outstanding actions you have left to take." msgstr "" "In order to check the current state of your database upgrades, you may run " "the command ``glance-manage db check``. This will inform you of any " "outstanding actions you have left to take." msgid "" "In order to preserve backward compatibilty with the current sharing " "workflow, the default visibility of an image in Ocata is 'shared'. " "Consistent with pre-Ocata behavior, this will allow the image to accept " "member operations without first updating the visibility of the image. (Keep " "in mind that an image with visibility 'shared' but having no members is not " "actually accessible to anyone other than the image owner, so this is not in " "itself a security problem.)" msgstr "" "In order to preserve backward compatibility with the current sharing " "workflow, the default visibility of an image in Ocata is 'shared'. " "Consistent with pre-Ocata behaviour, this will allow the image to accept " "member operations without first updating the visibility of the image. (Keep " "in mind that an image with visibility 'shared' but having no members is not " "actually accessible to anyone other than the image owner, so this is not in " "itself a security problem.)" msgid "" "In order to prevent users spamming other users' image-list response, " "community images are not included in the image-list response unless " "specifically requested by a user." msgstr "" "In order to prevent users spamming other users' image-list response, " "community images are not included in the image-list response unless " "specifically requested by a user." msgid "" "In the Newton release, the Glare code was relocated into its own `Glare`_ " "project repository. Also in the Newton release, Glare ran an EXPERIMENTAL " "Artifacts API versioned as ``v1.0`` on its own endpoint and used its own " "database." msgstr "" "In the Newton release, the Glare code was relocated into its own `Glare`_ " "project repository. Also in the Newton release, Glare ran an EXPERIMENTAL " "Artifacts API versioned as ``v1.0`` on its own endpoint and used its own " "database." msgid "" "In the v1 API, images have an ``is_public`` field (but no ``visibility`` " "field). Images for which ``is_public`` is True are the equivalent of images " "with 'public' visibility in the v2 API. Images for which ``is_public`` is " "false are the equivalent of v2 'shared' images if they have members, or the " "equivalent of v2 'private' images if they have no members." msgstr "" "In the v1 API, images have an ``is_public`` field (but no ``visibility`` " "field). Images for which ``is_public`` is True are the equivalent of images " "with 'public' visibility in the v2 API. Images for which ``is_public`` is " "false are the equivalent of v2 'shared' images if they have members, or the " "equivalent of v2 'private' images if they have no members." msgid "" "In this cycle Glance enabled the API policies (RBAC) new defaults and scope " "by default and removed the deprecated ``enforce_secure_rbac`` option which " "is no longer needed after switching to new defaults. The Default value of " "config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] " "oslo_policy.enforce_new_defaults`` have been changed to ``True``. Old " "policies are still there but they are disabled by default." msgstr "" "In this cycle Glance enabled the API policies (RBAC) new defaults and scope " "by default and removed the deprecated ``enforce_secure_rbac`` option which " "is no longer needed after switching to new defaults. The Default value of " "config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] " "oslo_policy.enforce_new_defaults`` have been changed to ``True``. Old " "policies are still there but they are disabled by default." msgid "" "In this release, the default value of ``admin_role`` has been defined as " "``__NOT_A_ROLE_07697c71e6174332989d3d5f2a7d2e7c_NOT_A_ROLE__``. This " "effectively makes it inoperable (unless your Keystone administrator has " "actually created such a role and assigned it to someone, which is unlikely " "but possible, so you should check). If your local policy tests (you have " "some, right?) indicate that your Glance policies no longer function as " "expected, then you have been relying on the ``admin_role`` configuration " "option and need to revise your policy file. (A short term fix would be to " "set the ``admin_role`` option back to ``admin``, but keep in mind that it " "*is* a short-term fix, because this configuration option is deprecated and " "subject to removal.)" msgstr "" "In this release, the default value of ``admin_role`` has been defined as " "``__NOT_A_ROLE_07697c71e6174332989d3d5f2a7d2e7c_NOT_A_ROLE__``. This " "effectively makes it inoperable (unless your Keystone administrator has " "actually created such a role and assigned it to someone, which is unlikely " "but possible, so you should check). If your local policy tests (you have " "some, right?) indicate that your Glance policies no longer function as " "expected, then you have been relying on the ``admin_role`` configuration " "option and need to revise your policy file. (A short term fix would be to " "set the ``admin_role`` option back to ``admin``, but keep in mind that it " "*is* a short-term fix, because this configuration option is deprecated and " "subject to removal.)" msgid "" "In this release, the stores used *must* be the filesystem store type. Our " "goal is that in a future release, operators will be able to configure a " "store type from other selected drivers as well. In Train, however, each of " "these *must* be a filesystem store." msgstr "" "In this release, the stores used *must* be the filesystem store type. Our " "goal is that in a future release, operators will be able to configure a " "store type from other selected drivers as well. In Train, however, each of " "these *must* be a filesystem store." msgid "" "In this release, the use of stores (instead of configuring the path " "directly) is optional, but it will become mandatory for the 'U' release." msgstr "" "In this release, the use of stores (instead of configuring the path " "directly) is optional, but it will become mandatory for the 'U' release." msgid "" "Include a ``Content-Range`` header upon successful delivery of the requested " "partial content." msgstr "" "Include a ``Content-Range`` header upon successful delivery of the requested " "partial content." msgid "" "It is now possible for Glance to use backends accessed via the glance_store " "library for the temporary storage of data that previously required access to " "the local filesystem. Please note the following:" msgstr "" "It is now possible for Glance to use backends accessed via the glance_store " "library for the temporary storage of data that previously required access to " "the local filesystem. Please note the following:" msgid "" "It must be noted that the configuration options that take integer values now " "have a strict range defined with ``min`` and/or ``max`` boundaries where " "appropriate." msgstr "" "It must be noted that the configuration options that take integer values now " "have a strict range defined with ``min`` and/or ``max`` boundaries where " "appropriate." msgid "Known Issues" msgstr "Known Issues" msgid "" "Latest release of ``glance_store`` library (used in the **Newton** release " "of Glance) will include fix for the ``glance_store`` bug 1619487." msgstr "" "Latest release of ``glance_store`` library (used in the **Newton** release " "of Glance) will include fix for the ``glance_store`` bug 1619487." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "" "Location metadata key ``backend`` has been changed to ``store``. Any " "environment that might be using the old ``backend`` key will have the key " "name changed through lazy update upon access." msgstr "" "Location metadata key ``backend`` has been changed to ``store``. Any " "environment that might be using the old ``backend`` key will have the key " "name changed through lazy update upon access." msgid "" "Location updates for images are now restricted to images in ``active`` or " "``queued`` status. Please refer to the \"Bug Fixes\" section for more " "information." msgstr "" "Location updates for images are now restricted to images in ``active`` or " "``queued`` status. Please refer to the \"Bug Fixes\" section for more " "information." msgid "" "Maintaining two different ways to configure, enable and/or disable a feature " "is painful for developers and operators, so the less granular means of " "controlling this feature will be eliminated in the **Ocata** release." msgstr "" "Maintaining two different ways to configure, enable and/or disable a feature " "is painful for developers and operators, so the less granular means of " "controlling this feature will be eliminated in the **Ocata** release." msgid "" "Manual use of the ``cinder_encryption_key_*`` properties is *not* " "recommended." msgstr "" "Manual use of the ``cinder_encryption_key_*`` properties is *not* " "recommended." msgid "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight." msgstr "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight." msgid "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight. You may either upgrade them using glance-manage db " "load_metadefs [path] [merge] [prefer_new] or glance-manage db upgrade 44." msgstr "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight. You may either upgrade them using glance-manage db " "load_metadefs [path] [merge] [prefer_new] or glance-manage db upgrade 44." msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "Moved policy enforcement in API layer" msgstr "Moved policy enforcement in API layer" msgid "" "Multiple back-ends: No default back-end gets assigned when adding location " "via the locations api and not defining back-end ID on the call. This might " "affect specially Nova snapshots utilizing rdb back-end." msgstr "" "Multiple back-ends: No default back-end gets assigned when adding location " "via the locations API and not defining back-end ID on the call. This might " "affect specially Nova snapshots utilising RDB back-end." msgid "" "Negotiation of the 'Accept-Language' header now follows the \"Lookup\" " "matching scheme described in `RFC 4647, section 3.4 `_. The \"Lookup\" scheme is one of the " "algorithms suggested in `RFC 7231, section 5.3.5 `_. (This is due to a change in an " "underlying library, which previously used a matching scheme that did not " "conform to `RFC 7231 `_.)" msgstr "" "Negotiation of the 'Accept-Language' header now follows the \"Lookup\" " "matching scheme described in `RFC 4647, section 3.4 `_. The \"Lookup\" scheme is one of the " "algorithms suggested in `RFC 7231, section 5.3.5 `_. (This is due to a change in an " "underlying library, which previously used a matching scheme that did not " "conform to `RFC 7231 `_.)" msgid "New Features" msgstr "New Features" msgid "" "New Interoperable Image Import plugin has been introduced to address the use " "case of providing compressed images either through 'web-download' or to " "optimize the network utilization between the client and Glance." msgstr "" "New Interoperable Image Import plugin has been introduced to address the use " "case of providing compressed images either through 'web-download' or to " "optimise the network utilisation between the client and Glance." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Note that in either case, when dealing with an image that has 'private' " "visibility in the v2 API, there is a safeguard against a user " "unintentionally adding a member to an image and exposing data. The " "safeguard is that you must perform an additional image update operation in " "either the v1 or v2 API before you can expose it to other users." msgstr "" "Note that in either case, when dealing with an image that has 'private' " "visibility in the v2 API, there is a safeguard against a user " "unintentionally adding a member to an image and exposing data. The " "safeguard is that you must perform an additional image update operation in " "either the v1 or v2 API before you can expose it to other users." msgid "" "Note that such images will have to have their visibility updated to 'shared' " "before they will accept members." msgstr "" "Note that such images will have to have their visibility updated to 'shared' " "before they will accept members." msgid "" "Note that the plugin applies *only* to images imported via the " "`interoperable image import process`_. Thus images whose data is set using " "the `image data upload`_ call will *not* be processed by the plugin and " "hence will not have properties injected. You can force end users to use the " "interoperable image import process by restricting the data upload call, " "which is governed by the ``upload_image`` policy in the Glance ``policy." "json`` file. See the documentation for more information." msgstr "" "Note that the plugin applies *only* to images imported via the " "`interoperable image import process`_. Thus images whose data is set using " "the `image data upload`_ call will *not* be processed by the plugin and " "hence will not have properties injected. You can force end users to use the " "interoperable image import process by restricting the data upload call, " "which is governed by the ``upload_image`` policy in the Glance ``policy." "json`` file. See the documentation for more information." msgid "" "Note that there are race conditions associated with adding a location to an " "image in the ``active``, ``queued``, ``saving``, or ``deactivated`` status. " "Because these are non-terminal image statuses, it is possible that when a " "user attempts to add a location, a status transition could occur that might " "block the **add** (or might appear to allow an add that should not be " "allowed)." msgstr "" "Note that there are race conditions associated with adding a location to an " "image in the ``active``, ``queued``, ``saving``, or ``deactivated`` status. " "Because these are non-terminal image statuses, it is possible that when a " "user attempts to add a location, a status transition could occur that might " "block the **add** (or might appear to allow an add that should not be " "allowed)." msgid "Note the code name for the \"ceph\" driver is ``rbd``." msgstr "Note the code name for the \"Ceph\" driver is ``rbd``." msgid "Note the ordering of the options within a store is not alphabetical." msgstr "Note the ordering of the options within a store is not alphabetical." msgid "Note: This is not a change. It's simply mentioned for completeness." msgstr "Note: This is not a change. It's simply mentioned for completeness." msgid "" "Now operator can control the scope and new defaults flag with the below " "config options in ``glance-api.conf`` file::" msgstr "" "Now operator can control the scope and new defaults flag with the below " "config options in ``glance-api.conf`` file::" msgid "" "OSprofiler support requires passing of trace information between various " "OpenStack services. This information is signed by one of HMAC keys, which we " "historically defined in glance-api-paste.ini and glance-registry-paste.ini " "files (together with enabled option, that in fact was duplicated in the " "corresponding configuration files). OSprofiler 0.3.1 and higher supports " "passing this information via configuration files, therefore it's recommended " "to modify the ``[filter:osprofiler]`` section in \\*-paste.ini to look like " "``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` and set the " "``hmac_keys`` option in the glance-\\*.conf files." msgstr "" "OSprofiler support requires passing of trace information between various " "OpenStack services. This information is signed by one of HMAC keys, which we " "historically defined in glance-api-paste.ini and glance-registry-paste.ini " "files (together with enabled option, that in fact was duplicated in the " "corresponding configuration files). OSprofiler 0.3.1 and higher supports " "passing this information via configuration files, therefore it's recommended " "to modify the ``[filter:osprofiler]`` section in \\*-paste.ini to look like " "``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` and set the " "``hmac_keys`` option in the glance-\\*.conf files." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "On this release requirements.txt were synced from global-requirements." msgstr "On this release requirements.txt were synced from global-requirements." msgid "On top of testing and documentation fixes following bugs were addressed" msgstr "" "On top of testing and documentation fixes following bugs were addressed" msgid "" "OpenStack deployments, packagers, and deployment projects which provided " "Glare should have begun to consume Glare from its own `Glare`_ respository " "during the Newton and Ocata releases. With the Pike release, it is no " "longer possible to consume Glare code from the Glance repository." msgstr "" "OpenStack deployments, packagers, and deployment projects which provided " "Glare should have begun to consume Glare from its own `Glare`_ repository " "during the Newton and Ocata releases. With the Pike release, it is no " "longer possible to consume Glare code from the Glance repository." msgid "" "Operators who use property protections with the " "``property_protection_rule_format`` set to ``policies`` must still define " "the policy rules used for property protections in a policy file. The " "content of the file may be JSON or YAML. Additionally, we suggest that the " "absolute pathname of this file be set as the value of ``policy_file`` in the " "``[oslo_policy]`` section of the ``glance-api.conf`` file." msgstr "" "Operators who use property protections with the " "``property_protection_rule_format`` set to ``policies`` must still define " "the policy rules used for property protections in a policy file. The " "content of the file may be JSON or YAML. Additionally, we suggest that the " "absolute pathname of this file be set as the value of ``policy_file`` in the " "``[oslo_policy]`` section of the ``glance-api.conf`` file." msgid "" "Options where a negative value was previously defined (for example, -1 to " "mean unlimited) will remain unaffected by this change." msgstr "" "Options where a negative value was previously defined (for example, -1 to " "mean unlimited) will remain unaffected by this change." msgid "Other Notes" msgstr "Other Notes" msgid "" "Periodic job to prefetch image(s) into cache has been removed from the " "glance api service with config option ``cache_prefetcher_interval`` which " "was added as an interval for the same periodic job also been removed as " "image(s) will be immediately queued for caching." msgstr "" "Periodic job to prefetch image(s) into cache has been removed from the " "Glance API service with config option ``cache_prefetcher_interval`` which " "was added as an interval for the same periodic job also been removed as " "image(s) will be immediately queued for caching." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Please keep a watch on the Glance release notes and the glance-specs " "repository to stay informed about developments on this issue." msgstr "" "Please keep a watch on the Glance release notes and the glance-specs " "repository to stay informed about developments on this issue." msgid "" "Please keep in mind that as version 2.8 of the Image Service API is " "EXPERIMENTAL, we reserve the right to make modifications to these aspects of " "the API should user feedback indicate that a change is required." msgstr "" "Please keep in mind that as version 2.8 of the Image Service API is " "EXPERIMENTAL, we reserve the right to make modifications to these aspects of " "the API should user feedback indicate that a change is required." msgid "" "Please note a change in the Scrubber's behavior in case of job fetching " "errors:" msgstr "" "Please note a change in the Scrubber's behaviour in case of job fetching " "errors:" msgid "" "Please note that not all Glance storage backends support partial downloads. " "A Range request to a Glance server with such a backend will result in the " "entire image content being delivered despite the 206 response code." msgstr "" "Please note that not all Glance storage backends support partial downloads. " "A Range request to a Glance server with such a backend will result in the " "entire image content being delivered despite the 206 response code." msgid "" "Please see the Upgrades section of this document and the \"Multi Store " "Support\" chapter of the Glance Administration Guide for more information." msgstr "" "Please see the Upgrades section of this document and the \"Multi Store " "Support\" chapter of the Glance Administration Guide for more information." msgid "" "Policy defaults are now defined in code, as they already were in other " "OpenStack services. After upgrading there is no need to provide a ``policy." "json`` file (and you should not do so) unless you want to override the " "default policies, and only policies you want to override need be mentioned " "in the file. You should no longer rely on the ``default`` rule, and " "especially not the default value of the rule (which has been relaxed), to " "assign a non-default policy to rules not explicitly specified in the policy " "file." msgstr "" "Policy defaults are now defined in code, as they already were in other " "OpenStack services. After upgrading there is no need to provide a ``policy." "json`` file (and you should not do so) unless you want to override the " "default policies, and only policies you want to override need be mentioned " "in the file. You should no longer rely on the ``default`` rule, and " "especially not the default value of the rule (which has been relaxed), to " "assign a non-default policy to rules not explicitly specified in the policy " "file." msgid "" "Policy enforcement for several Metadata Definition delete APIs are added in " "this release. The following actions are enforced and added to the policy." "json:" msgstr "" "Policy enforcement for several Metadata Definition delete APIs are added in " "this release. The following actions are enforced and added to the policy." "json:" msgid "Prelude" msgstr "Prelude" msgid "" "Prior to Ocata, an image with 'private' visibility could become shared by " "adding members to it, though its visibility remained 'private'. In order to " "make the visibility of images more clear, in Ocata the following changes are " "introduced:" msgstr "" "Prior to Ocata, an image with 'private' visibility could become shared by " "adding members to it, though its visibility remained 'private'. In order to " "make the visibility of images more clear, in Ocata the following changes are " "introduced:" msgid "" "Prior to Ocata, the Glance database did not have a 'visibility' column, but " "instead used a boolean 'is_public' column, which was translated into " "'public' or 'private' visibility in the Images API v2 image response. As " "part of the upgrade to Ocata, a 'visibility' column is introduced into the " "images table. It will be populated as follows" msgstr "" "Prior to Ocata, the Glance database did not have a 'visibility' column, but " "instead used a boolean 'is_public' column, which was translated into " "'public' or 'private' visibility in the Images API v2 image response. As " "part of the upgrade to Ocata, a 'visibility' column is introduced into the " "images table. It will be populated as follows" msgid "" "Python 2.7 support has been dropped. Last release of Glance to support py2.7 " "is OpenStack Train (Glance 19.x). The minimum version of Python now " "supported by Glance is Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Glance to support py2.7 " "is OpenStack Train (Glance 19.x). The minimum version of Python now " "supported by Glance is Python 3.6." msgid "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgstr "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Re-introducing cache-manage utility. In Rocky the Images API v1 dependent " "glance-cache-manage utility was removed while removing the v1 endpoints. " "Stein release introduces the command refactored to utilize the Images API " "version 2." msgstr "" "Re-introducing cache-manage utility. In Rocky the Images API v1 dependent " "glance-cache-manage utility was removed while removing the v1 endpoints. " "Stein release introduces the command refactored to utilize the Images API " "version 2." msgid "" "Removed the deprecated 'enable_image_import' config option. Image import " "will be always enabled from this release onwards as designed." msgstr "" "Removed the deprecated 'enable_image_import' config option. Image import " "will be always enabled from this release onwards as designed." msgid "" "Removed the deprecated 'secure_proxy_ssl_header' config option. Image import " "will be always enabled from this release onwards as designed." msgstr "" "Removed the deprecated 'secure_proxy_ssl_header' config option. Image import " "will be always enabled from this release onwards as designed." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Rocky development cycle marks long waited milestone on Glance work. The " "Images API v1 which has been deprecated for years is finally removed and not " "available at all in Glance version 17.0.0 forward." msgstr "" "Rocky development cycle marks long waited milestone on Glance work. The " "Images API v1 which has been deprecated for years is finally removed and not " "available at all in Glance version 17.0.0 forward." msgid "" "Sample configuration file shipped with Glance source now has reordered store " "drivers configuration options for future consistent ordering." msgstr "" "Sample configuration file shipped with Glance source now has reordered store " "drivers configuration options for future consistent ordering." msgid "Security Issues" msgstr "Security Issues" msgid "" "See the \"Deprecation Notes\" section of this document for more information." msgstr "" "See the \"Deprecation Notes\" section of this document for more information." msgid "" "See the documentation in the sample glance-api.conf file for more " "information." msgstr "" "See the documentation in the sample glance-api.conf file for more " "information." msgid "" "Several `new values`_ were added for the ``vmware_ostype`` property in the " "``OS::Compute::VMware`` namespace." msgstr "" "Several `new values`_ were added for the ``vmware_ostype`` property in the " "``OS::Compute::VMware`` namespace." msgid "" "Since the default value for 'visibility' upon image creation is 'shared', an " "image freshly created using the v1 API can have members added to it, just as " "it did pre-Ocata." msgstr "" "Since the default value for 'visibility' upon image creation is 'shared', an " "image freshly created using the v1 API can have members added to it, just as " "it did pre-Ocata." msgid "" "Some additional points about ``show_multiple_locations`` configuration " "option deprecation." msgstr "" "Some additional points about ``show_multiple_locations`` configuration " "option deprecation." msgid "" "Some backend store names were inconsistent between glance and glance_store. " "This meant that operators of the VMware datastore or file system store were " "required to use store names in ``glance-api.conf`` that did not correspond " "to any valid identifier in glance_store. As this situation encouraged " "misconfiguration and operator unhappiness, we have made the store names " "consistent in the Newton release. What this means for you:" msgstr "" "Some backend store names were inconsistent between glance and glance_store. " "This meant that operators of the VMware datastore or file system store were " "required to use store names in ``glance-api.conf`` that did not correspond " "to any valid identifier in glance_store. As this situation encouraged " "misconfiguration and operator unhappiness, we have made the store names " "consistent in the Newton release. What this means for you:" msgid "" "Some configuration is required in order to make the Interoperable Image " "Import functionality work correctly. In particular, the " "``node_staging_uri`` value in the glance-api.conf file must be set. See the " "section on Interoperable Image Import in the `Glance Administration Guide`_ " "for more information." msgstr "" "Some configuration is required in order to make the Interoperable Image " "Import functionality work correctly. In particular, the " "``node_staging_uri`` value in the glance-api.conf file must be set. See the " "section on Interoperable Image Import in the `Glance Administration Guide`_ " "for more information." msgid "" "Some documentation and test issues have been addressed in this release on " "top of the following bugfixes:" msgstr "" "Some documentation and test issues have been addressed in this release on " "top of the following bugfixes:" msgid "" "Some operators have reported issues with reordering observed in the sample " "configurations shipped with Glance release tarballs. This reordering may " "result into a incorrect \"diff\" of the configurations used downstream vs. " "newly introduced upstream." msgstr "" "Some operators have reported issues with reordering observed in the sample " "configurations shipped with Glance release tarballs. This reordering may " "result into a incorrect \"diff\" of the configurations used downstream vs. " "newly introduced upstream." msgid "" "Some security aspects were tackled for this release. Multihash, providing " "secure hashing for image data with future proof options marks the end of " "relying upon MD5 checksums when verifying image payloads. OSSN-0075 " "migitation lessens the risk of ID reusability on those very rare cases when " "a database purge is necessary." msgstr "" "Some security aspects were tackled for this release. Multihash, providing " "secure hashing for image data with future proof options marks the end of " "relying upon MD5 checksums when verifying image payloads. OSSN-0075 " "mitigation lessens the risk of ID re-usability on those very rare cases when " "a database purge is necessary." msgid "" "Some work has been done on Windows compatibility and Glance Stein release is " "compatible running on Windows platform. Future development will be also " "gated on Windows based on 3rd party CI model." msgstr "" "Some work has been done on Windows compatibility and Glance Stein release is " "compatible running on Windows platform. Future development will be also " "gated on Windows based on 3rd party CI model." msgid "" "Stabilization of multi-store feature; from Train onwards multi-store is " "considered stable feature in glance, glance_store and python-glanceclient. " "The community encourages everyone to adopt this new way of configuring " "backend stores at earliest convenience as the old configuration options are " "deprecated for removal to ease the burden of maintaining underlying code. " "Users are able to select the store they want their images to be stored " "during import process." msgstr "" "Stabilization of multi-store feature; from Train onwards multi-store is " "considered stable feature in glance, glance_store and python-glanceclient. " "The community encourages everyone to adopt this new way of configuring " "backend stores at earliest convenience as the old configuration options are " "deprecated for removal to ease the burden of maintaining underlying code. " "Users are able to select the store they want their images to be stored " "during import process." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Stein release cycle has been without major changes to the Images API, this " "release does not introduce new Images API minor version." msgstr "" "Stein release cycle has been without major changes to the Images API, this " "release does not introduce new Images API minor version." msgid "" "Such an image will require its visibility to be updated to 'shared' before " "it will accept members." msgstr "" "Such an image will require its visibility to be updated to 'shared' before " "it will accept members." msgid "" "Support for running Glance in Windows operating systems has been deprecated " "because of retirement of the Winstackers project." msgstr "" "Support for running Glance in Windows operating systems has been deprecated " "because of the retirement of the Winstackers project." msgid "" "The \"multihash\" implemented in this release (`Secure Hash Algorithm " "Support `_) is computed only for new images. There " "is no provision for computing the multihash for existing images. Thus, " "users should expect to see JSON 'null' values for the ``os_hash_algo`` and " "``os_hash_value`` image properties on images created prior to the " "installation of the Rocky release at your site." msgstr "" "The \"multihash\" implemented in this release (`Secure Hash Algorithm " "Support `_) is computed only for new images. There " "is no provision for computing the multihash for existing images. Thus, " "users should expect to see JSON 'null' values for the ``os_hash_algo`` and " "``os_hash_value`` image properties on images created prior to the " "installation of the Rocky release at your site." msgid "" "The 'visibility' enumeration has been increased from two values (``public``, " "``private``) to four values (``public``, ``private``, ``shared``, and " "``community``)." msgstr "" "The 'visibility' enumeration has been increased from two values (``public``, " "``private``) to four values (``public``, ``private``, ``shared``, and " "``community``)." msgid "" "The **CURRENT** version of the Images API supplied by Glance is introduced " "as **2.6**. It includes the new API calls introduced on an experimental " "basis in the Pike release." msgstr "" "The **CURRENT** version of the Images API supplied by Glance is introduced " "as **2.6**. It includes the new API calls introduced on an experimental " "basis in the Pike release." msgid "" "The **CURRENT** version of the version 2 Images API supplied by Glance is " "now **2.5**. Changes include:" msgstr "" "The **CURRENT** version of the version 2 Images API supplied by Glance is " "now **2.5**. Changes include:" msgid "" "The *Community Images* feature has been introduced in the Images API v2. " "This enables a user to make an image available for consumption by all other " "users. In association with this change, the 'visibility' values for an " "image have been expanded to include 'community' and 'shared'." msgstr "" "The *Community Images* feature has been introduced in the Images API v2. " "This enables a user to make an image available for consumption by all other " "users. In association with this change, the 'visibility' values for an " "image have been expanded to include 'community' and 'shared'." msgid "The *minor* version of the Images API v2 is bumped to **2.5**." msgstr "The *minor* version of the Images API v2 is bumped to **2.5**." msgid "" "The *minor* version of the Images API v2 is bumped to **2.6** to introduce " "an EXPERIMENTAL version of the API that includes the new calls introduced " "for the Minimal Viable Product delivery of the `refactored image import`_ " "functionality. Version **2.5** remains the CURRENT version of the Images " "API." msgstr "" "The *minor* version of the Images API v2 is bumped to **2.6** to introduce " "an EXPERIMENTAL version of the API that includes the new calls introduced " "for the Minimal Viable Product delivery of the `refactored image import`_ " "functionality. Version **2.5** remains the CURRENT version of the Images " "API." msgid "" "The Artifacts API was an EXPERIMENTAL API that ran on the Glance service " "endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the " "Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service ran " "on its own endpoint (completely independent from the Glance service " "endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the " "Liberty and Mitaka releases, Glare ran on code stored in the Glance code " "repository and used its own tables in the Glance database." msgstr "" "The Artifacts API was an EXPERIMENTAL API that ran on the Glance service " "endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the " "Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service ran " "on its own endpoint (completely independent from the Glance service " "endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the " "Liberty and Mitaka releases, Glare ran on code stored in the Glance code " "repository and used its own tables in the Glance database." msgid "" "The Block Storage service *always* creates a new secret in Barbican when it " "uploads a volume as an image, keeping a 1-1 relation between each secret " "stored in the Key Management Service and each image of an encrypted volume " "stored in Glance. Thus, deleting the Barbican secret *at the time when the " "image is deleted* will not cause data loss *as long as the secret is not " "being used for any other purpose*." msgstr "" "The Block Storage service *always* creates a new secret in Barbican when it " "uploads a volume as an image, keeping a 1-1 relation between each secret " "stored in the Key Management Service and each image of an encrypted volume " "stored in Glance. Thus, deleting the Barbican secret *at the time when the " "image is deleted* will not cause data loss *as long as the secret is not " "being used for any other purpose*." msgid "" "The Block Storage service will not use the secret associated with an image " "for any other purpose." msgstr "" "The Block Storage service will not use the secret associated with an image " "for any other purpose." msgid "" "The CURRENT version of the Images API v2 is bumped to **2.6**. The 2.6 API " "was available in the previous (Pike) release as an experimental API to " "introduce the calls necessary for the `interoperable image import " "functionality`_." msgstr "" "The CURRENT version of the Images API v2 is bumped to **2.6**. The 2.6 API " "was available in the previous (Pike) release as an experimental API to " "introduce the calls necessary for the `interoperable image import " "functionality`_." msgid "" "The DEPRECATED Images API v1 does not have a concept of \"visibility\", and " "in a \"pure\" v1 deployment, you would not notice that anything had " "changed. Since, however, we hope that there aren't many of those around " "anymore, here's what you can expect to see if you use the Images API v1 in a " "\"mixed\" deployment." msgstr "" "The DEPRECATED Images API v1 does not have a concept of \"visibility\", and " "in a \"pure\" v1 deployment, you would not notice that anything had " "changed. Since, however, we hope that there aren't many of those around any " "more, here's what you can expect to see if you use the Images API v1 in a " "\"mixed\" deployment." msgid "" "The Database Management sections of the `Glance Administration Guide`_ have " "been revised and updated. This includes information about the current " "experimental status of rolling upgrades and zero-downtime database upgrades." msgstr "" "The Database Management sections of the `Glance Administration Guide`_ have " "been revised and updated. This includes information about the current " "experimental status of rolling upgrades and zero-downtime database upgrades." msgid "" "The Glance API configuration option ``admin_role`` is deprecated in this " "release and is subject to removal at the beginning of the Victoria " "development cycle, following the `OpenStack standard deprecation policy " "`_." msgstr "" "The Glance API configuration option ``admin_role`` is deprecated in this " "release and is subject to removal at the beginning of the Victoria " "development cycle, following the `OpenStack standard deprecation policy " "`_." msgid "" "The Glance API configuration option ``admin_role``, having been deprecated " "in the Ussuri release, is now removed. If present in a configuration file, " "it will be silently ignored." msgstr "" "The Glance API configuration option ``admin_role``, having been deprecated " "in the Ussuri release, is now removed. If present in a configuration file, " "it will be silently ignored." msgid "" "The Glance API configuration option ``allow_additional_image_properties`` is " "deprecated in this release and is subject to removal at the beginning of the " "Victoria development cycle, following the `OpenStack standard deprecation " "policy `_." msgstr "" "The Glance API configuration option ``allow_additional_image_properties`` is " "deprecated in this release and is subject to removal at the beginning of the " "Victoria development cycle, following the `OpenStack standard deprecation " "policy `_." msgid "" "The Glance API configuration option ``owner_is_tenant`` is deprecated in " "this release and is subject to removal at the beginning of the 'S' " "development cycle, following the `OpenStack standard deprecation policy " "`_." msgstr "" "The Glance API configuration option ``owner_is_tenant`` is deprecated in " "this release and is subject to removal at the beginning of the 'S' " "development cycle, following the `OpenStack standard deprecation policy " "`_." msgid "" "The Glance API configuration options ``location_strategy`` and " "``store_type_preference`` are deprecated in this release and are subject to " "removal at the beginning of the Dalmatian development cycle, following the " "`OpenStack standard deprecation policy `_." msgstr "" "The Glance API configuration options ``location_strategy`` and " "``store_type_preference`` are deprecated in this release and are subject to " "removal at the beginning of the Dalmatian development cycle, following the " "`OpenStack standard deprecation policy `_." msgid "" "The Glance API configuration options ``metadata_encryption_key`` is " "deprecated in this release and is subject to removal at the beginning of the " "`F` (2025.2) development cycle." msgstr "" "The Glance API configuration options ``metadata_encryption_key`` is " "deprecated in this release and is subject to removal at the beginning of the " "`F` (2025.2) development cycle." msgid "" "The Glance Registry Service and its APIs are officially DEPRECATED in this " "release and are subject to removal at the beginning of the 'S' development " "cycle, following the `OpenStack standard deprecation policy `_." msgstr "" "The Glance Registry Service and its APIs are officially DEPRECATED in this " "release and are subject to removal at the beginning of the 'S' development " "cycle, following the `OpenStack standard deprecation policy `_." msgid "" "The Glance cache driver ``sqlite`` is deprecated in this release and is " "subject to removal at the beginning of the `E` (2025.1) development cycle, " "following the `OpenStack standard deprecation policy `_." msgstr "" "The Glance cache driver ``sqlite`` is deprecated in this release and is " "subject to removal at the beginning of the `E` (2025.1) development cycle, " "following the `OpenStack standard deprecation policy `_." msgid "" "The Glance documentation section `Running Glance in HTTPD`_ outlines some " "approaches to use (and not to use) Glance with the Apache httpd server. This " "is the way Glance is configured as a WSGI application in devstack, so it's " "the method with which we've had the most experience. If you try deploying " "Glance using a different web server, please consider contributing your " "findings to the Glance documentation." msgstr "" "The Glance documentation section `Running Glance in HTTPD`_ outlines some " "approaches to use (and not to use) Glance with the Apache HTTPd server. This " "is the way Glance is configured as a WSGI application in devstack, so it's " "the method with which we've had the most experience. If you try deploying " "Glance using a different web server, please consider contributing your " "findings to the Glance documentation." msgid "" "The Glance policies have been modified to drop the system scope. Every API " "policy is scoped to project. This means that system scoped users will get " "403 permission denied error." msgstr "" "The Glance policies have been modified to drop the system scope. Every API " "policy is scoped to the project. This means that system-scoped users will " "get a 403 permission denied error." msgid "" "The Glance project team is committed to the stability of Glance. As part of " "OpenStack, we are committed to `The Four Opens`_. If the ability to run " "Glance under uWSGI is important to you, feel free to participate in the " "Glance community to help coordinate and drive such an effort. (We gently " "remind you that \"participation\" includes providing testing and development " "resources.)" msgstr "" "The Glance project team is committed to the stability of Glance. As part of " "OpenStack, we are committed to `The Four Opens`_. If the ability to run " "Glance under uWSGI is important to you, feel free to participate in the " "Glance community to help coordinate and drive such an effort. (We gently " "remind you that \"participation\" includes providing testing and development " "resources.)" msgid "" "The Glance scrubber, which is invoked by the ``glance-scrubber`` command, is " "deprecated in this release and is subject to removal at the beginning of the " "2024.2 (Dalmatian) development cycle, following the `OpenStack standard " "deprecation policy `_." msgstr "" "The Glance scrubber, which is invoked by the ``glance-scrubber`` command, is " "deprecated in this release and is subject to removal at the beginning of the " "2024.2 (Dalmatian) development cycle, following the `OpenStack standard " "deprecation policy `_." msgid "" "The Glance service enables the API policies (RBAC) new defaults and scope by " "default. The Default value of config options ``[oslo_policy] enforce_scope`` " "and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed to " "``True``." msgstr "" "The Glance service enables the API policies (RBAC) new defaults and scope by " "default. The Default value of config options ``[oslo_policy] enforce_scope`` " "and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed to " "``True``." msgid "" "The Image Service API Reference has been updated with a section on the " "`Interoperable image import`_ process (also known as \"image import " "refactored\") and the API calls that are exposed to implement it in the " "EXPERIMENTAL v2.6 of the API." msgstr "" "The Image Service API Reference has been updated with a section on the " "`Interoperable image import`_ process (also known as \"image import " "refactored\") and the API calls that are exposed to implement it in the " "EXPERIMENTAL v2.6 of the API." msgid "" "The Image ``checksum`` property contains an MD5 hash of the image data " "associated with an image. MD5 has not been considered secure for some time, " "and in order to comply with various security standards (for example, FIPS), " "an implementation of the MD5 algorithm may not be available on glance nodes." msgstr "" "The Image ``checksum`` property contains an MD5 hash of the image data " "associated with an image. MD5 has not been considered secure for some time, " "and in order to comply with various security standards (for example, FIPS), " "an implementation of the MD5 algorithm may not be available on Glance nodes." msgid "" "The Image service API call ``DELETE /v2/cache/{image_id}`` and ``DELETE /v2/" "cache`` now returns a 204 (No Content) response code to indicate success. In " "glance 24.0.0 (the initial Yoga release), it had mistakenly returned a 200." msgstr "" "The Image service API call ``DELETE /v2/cache/{image_id}`` and ``DELETE /v2/" "cache`` now returns a 204 (No Content) response code to indicate success. In " "Glance 24.0.0 (the initial Yoga release), it had mistakenly returned a 200." msgid "" "The Image service API call ``PUT /v2/cache/{image_id}`` now returns a 202 " "(Accepted) response code to indicate success. In glance 24.0.0 (the initial " "Yoga release), it had mistakenly returned a 200." msgstr "" "The Image service API call ``PUT /v2/cache/{image_id}`` now returns a 202 " "(Accepted) response code to indicate success. In glance 24.0.0 (the initial " "Yoga release), it had mistakenly returned a 200." msgid "" "The Images (Glance) version 1 API has been DEPRECATED. Please see " "deprecations section for more information." msgstr "" "The Images (Glance) version 1 API has been DEPRECATED. Please see " "deprecations section for more information." msgid "" "The Interoperable Image Import section of the `Image Service API v2 " "Reference Guide`_ was updated to include the new ``web-download`` import " "method." msgstr "" "The Interoperable Image Import section of the `Image Service API v2 " "Reference Guide`_ was updated to include the new ``web-download`` import " "method." msgid "" "The Pike release notes pointed out that although support had been added to " "run Glance as a WSGI application hosted by a web server, the Glance team " "recommended that Glance be run in its normal standalone configuration, " "particularly in production environments." msgstr "" "The Pike release notes pointed out that although support had been added to " "run Glance as a WSGI application hosted by a web server, the Glance team " "recommended that Glance be run in its normal standalone configuration, " "particularly in production environments." msgid "" "The Victoria release includes some important milestones in Glance " "development priorities." msgstr "" "The Victoria release includes some important milestones in Glance " "development priorities." msgid "" "The Xena release includes some important milestones in Glance development " "priorities." msgstr "" "The Xena release includes some important milestones in Glance development " "priorities." msgid "" "The Xena release of Glance is a midpoint in the process of refactoring how " "our policies are applied to API operations. The goal of applying policy " "enforcement in the API will ultimately increase the flexibility operators " "have over which users can do what operations to which images, and provides a " "path for compliant Secure RBAC and scoped tokens. In Xena, some policies are " "more flexible than they once were, allowing for more fine-grained assignment " "of responsibilities, but not all things are possible yet. If " "`enforce_secure_rbac` is not enabled, most things are still enforcing the " "legacy behavior of hard and fast admin-or-owner requirements." msgstr "" "The Xena release of Glance is a midpoint in the process of refactoring how " "our policies are applied to API operations. The goal of applying policy " "enforcement in the API will ultimately increase the flexibility operators " "have over which users can do what operations to which images, and provides a " "path for compliant Secure RBAC and scoped tokens. In Xena, some policies are " "more flexible than they once were, allowing for more fine-grained assignment " "of responsibilities, but not all things are possible yet. If " "`enforce_secure_rbac` is not enabled, most things are still enforcing the " "legacy behaviour of hard and fast admin-or-owner requirements." msgid "" "The Yoga release includes some important milestones in Glance development " "priorities." msgstr "" "The Yoga release includes some important milestones in Glance development " "priorities." msgid "" "The Zed release includes some important milestones in Glance development " "priorities. * Extended the functionality of stores-detail API * Added glance-" "download internal plugin to download the image from remote glance * Added " "support for immediate caching of an image * Removed dead code of auth and " "policy layers" msgstr "" "The Zed release includes some important milestones in Glance development " "priorities. * Extended the functionality of stores-detail API * Added glance-" "download internal plugin to download the image from remote glance * Added " "support for immediate caching of an image * Removed dead code of auth and " "policy layers" msgid "" "The `Multi-Store Backend Support `_ feature is " "introduced on an experimental basis in the EXPERIMENTAL Image Service API " "version 2.8:" msgstr "" "The `Multi-Store Backend Support `_ feature is " "introduced on an experimental basis in the EXPERIMENTAL Image Service API " "version 2.8:" msgid "" "The `Tasks API`_ was made admin-only by default in Mitaka by restricting the " "following policy targets to **role:admin**: **get_task**, **get_tasks**, " "**add_task**, and **modify_task**." msgstr "" "The `Tasks API`_ was made admin-only by default in Mitaka by restricting the " "following policy targets to **role:admin**: **get_task**, **get_tasks**, " "**add_task**, and **modify_task**." msgid "" "The ``allow_additional_image_properties`` configuration option, which was " "deprecated in Ussuri, has been removed in this release." msgstr "" "The ``allow_additional_image_properties`` configuration option, which was " "deprecated in Ussuri, has been removed in this release." msgid "" "The ``compressed`` container format was added in support of the Cinder " "(Block Storage Service) feature `Leverage compression accelerator `_. You may expect that Cinder will be able to consume any " "image in ``compressed`` container format *that Cinder has created*. You " "should not expect, however, for other services to be able to consume such an " "image at the present time. Further, you should not expect Cinder to be able " "to successfully use an image in ``compressed`` format that it has not " "created itself." msgstr "" "The ``compressed`` container format was added in support of the Cinder " "(Block Storage Service) feature `Leverage compression accelerator `_. You may expect that Cinder will be able to consume any " "image in ``compressed`` container format *that Cinder has created*. You " "should not expect, however, for other services to be able to consume such an " "image at the present time. Further, you should not expect Cinder to be able " "to successfully use an image in ``compressed`` format that it has not " "created itself." msgid "" "The ``cursive`` library is an OpenStack project which implements OpenStack-" "specific verification of digital signatures." msgstr "" "The ``cursive`` library is an OpenStack project which implements OpenStack-" "specific verification of digital signatures." msgid "" "The ``db_downgrade`` command has been removed from the ``glance-manage`` " "utility and all database downgrade scripts have been removed. In accord " "with OpenStack policy, Glance cannot be downgraded any more. Operators are " "advised to make a full database backup of their production data before " "attempting any upgrade." msgstr "" "The ``db_downgrade`` command has been removed from the ``glance-manage`` " "utility and all database downgrade scripts have been removed. In accord " "with OpenStack policy, Glance cannot be downgraded any more. Operators are " "advised to make a full database backup of their production data before " "attempting any upgrade." msgid "" "The ``default`` policy in ``policy.json`` now uses the admin role rather " "than any role. This is to make the policy file restrictive rather than " "permissive and tighten security." msgstr "" "The ``default`` policy in ``policy.json`` now uses the admin role rather " "than any role. This is to make the policy file restrictive rather than " "permissive and tighten security." msgid "" "The ``digest_algorithm`` configuration option has been deprecated in this " "release and is subject to removal at the beginning of the F development " "cycle, following the `OpenStack standard deprecation policy `_." msgstr "" "The ``digest_algorithm`` configuration option has been deprecated in this " "release and is subject to removal at the beginning of the F development " "cycle, following the `OpenStack standard deprecation policy `_." msgid "" "The ``disk_format`` config option enables ``ploop`` as supported by default." msgstr "" "The ``disk_format`` config option enables ``ploop`` as supported by default." msgid "" "The ``disk_format`` config option enables ``vhdx`` as supported by default." msgstr "" "The ``disk_format`` config option enables ``vhdx`` as supported by default." msgid "" "The ``enable_image_import`` configuration option was introduced as " "DEPRECATED in Pike and will be removed in Rocky." msgstr "" "The ``enable_image_import`` configuration option was introduced as " "DEPRECATED in Pike and will be removed in Rocky." msgid "" "The ``glance-cache-manage`` command is deprecated in this release in favor " "of the new Cache API. It is subject to removal at the beginning of the " "Dalmatian development cycle, following the `OpenStack standard deprecation " "policy `_." msgstr "" "The ``glance-cache-manage`` command is deprecated in this release in favor " "of the new Cache API. It is subject to removal at the beginning of the " "Dalmatian development cycle, following the `OpenStack standard deprecation " "policy `_." msgid "" "The ``glance-manage`` tool has been updated to address `OSSN-0075`_. Please " "see the `Database Maintenance`_ section of the Glance Administration Guide " "for details." msgstr "" "The ``glance-manage`` tool has been updated to address `OSSN-0075`_. Please " "see the `Database Maintenance`_ section of the Glance Administration Guide " "for details." msgid "" "The ``glance-replicator`` options ``mastertoken`` and ``slavetoken`` were " "deprecated in the Pike release cycle. These options have now been removed. " "The options ``sourcetoken`` and ``targettoken`` should be used instead." msgstr "" "The ``glance-replicator`` options ``mastertoken`` and ``slavetoken`` were " "deprecated in the Pike release cycle. These options have now been removed. " "The options ``sourcetoken`` and ``targettoken`` should be used instead." msgid "" "The ``inject_image_metadata`` task will no longer allow setting properties " "in the reserved ``os_glance_*`` namespace, in line with the blanket " "prohibition on such via the API. It has always been dangerous to do this, so " "no operator should have any such configuration in production. If any keys in " "this namespace are set, they will be dropped (and logged) during the " "injection process." msgstr "" "The ``inject_image_metadata`` task will no longer allow setting properties " "in the reserved ``os_glance_*`` namespace, in line with the blanket " "prohibition on such via the API. It has always been dangerous to do this, so " "no operator should have any such configuration in production. If any keys in " "this namespace are set, they will be dropped (and logged) during the " "injection process." msgid "" "The ``location_strategy`` functionality which was deprecated in " "Bobcat(2023.2), has been removed in this release." msgstr "" "The ``location_strategy`` functionality which was deprecated in " "Bobcat(2023.2), has been removed in this release." msgid "" "The ``os_hash_value`` image property, introduced as part of the `Secure Hash " "Algorithm Support `_ (\"multihash\") feature, is limited " "to 128 characters. This is sufficient to store 512 bits as a hexadecimal " "numeral." msgstr "" "The ``os_hash_value`` image property, introduced as part of the `Secure Hash " "Algorithm Support `_ (\"multihash\") feature, is limited " "to 128 characters. This is sufficient to store 512 bits as a hexadecimal " "numeral." msgid "" "The ``owner_is_tenant`` configuration option, which was deprecated in Rocky, " "has been removed in this release. As announced in the spec `Deprecate " "owner_is_tenant `_, given " "that an operator survey indicated that this option was only used in its " "default value of ``True``, no database migration is included in this release." msgstr "" "The ``owner_is_tenant`` configuration option, which was deprecated in Rocky, " "has been removed in this release. As announced in the spec `Deprecate " "owner_is_tenant `_, given " "that an operator survey indicated that this option was only used in its " "default value of ``True``, no database migration is included in this release." msgid "The ``s3`` store driver has been removed." msgstr "The ``s3`` store driver has been removed." msgid "" "The ``sheepdog`` storage backend driver was deprecated in the Train release " "and has now been removed. Any deployments still using Sheepdog storage will " "need to migrate to a different backend storage prior to upgrading to this " "release." msgstr "" "The ``sheepdog`` storage backend driver was deprecated in the Train release " "and has now been removed. Any deployments still using Sheepdog storage will " "need to migrate to a different backend storage prior to upgrading to this " "release." msgid "" "The ``show_multiple_locations`` configuration option remains DEPRECATED but " "not removed in the Train release. We continue to recommend that image " "locations not be exposed to end users. See `OSSN-0065 `_ for more information." msgstr "" "The ``show_multiple_locations`` configuration option remains DEPRECATED but " "not removed in the Train release. We continue to recommend that image " "locations not be exposed to end users. See `OSSN-0065 `_ for more information." msgid "" "The ``show_multiple_locations`` configuration option remains DEPRECATED but " "not removed in the Ussuri release. We continue to recommend that image " "locations not be exposed to end users. See `OSSN-0065 `_ for more information." msgstr "" "The ``show_multiple_locations`` configuration option remains DEPRECATED but " "not removed in the Ussuri release. We continue to recommend that image " "locations not be exposed to end users. See `OSSN-0065 `_ for more information." msgid "" "The ``show_multiple_locations`` configuration option remains deprecated in " "this release, but it has not been removed. (It had been scheduled for " "removal in the Pike release.) Please keep a watch on the Glance release " "notes and the glance-specs repository to stay informed about developments on " "this issue." msgstr "" "The ``show_multiple_locations`` configuration option remains deprecated in " "this release, but it has not been removed. (It had been scheduled for " "removal in the Pike release.) Please keep a watch on the Glance release " "notes and the glance-specs repository to stay informed about developments on " "this issue." msgid "" "The ``web-download`` import-method, intended to be a replacement for the " "popular Image Service API v1 \"copy-from\" functionality, is configurable so " "that you can avoid the vulnerabilty described in `OSSN-0078`_. See the " "Interoperable Image Import section of the `Glance Administration Guide`_ for " "details." msgstr "" "The ``web-download`` import-method, intended to be a replacement for the " "popular Image Service API v1 \"copy-from\" functionality, is configurable so " "that you can avoid the vulnerability described in `OSSN-0078`_. See the " "Interoperable Image Import section of the `Glance Administration Guide`_ for " "details." msgid "" "The `documentation was reorganized`_ in accord with the new standard layout " "for OpenStack projects." msgstr "" "The `documentation was reorganised`_ in accord with the new standard layout " "for OpenStack projects." msgid "" "The `metadata_encryption_key` and it's related functioanlity don't serve the " "purpose of encryption of location metadata, whereas it encrypts location url " "only for specific APIs. Also if enabled this during an upgrade, may disrupt " "existing deployments, as it does not support/provide db upgrade script to " "encrypt existing location URLs. Moreover, its functionality for encrypting " "location URLs is inconsistent which resulting in download failures." msgstr "" "The `metadata_encryption_key` and it's related functioanlity don't serve the " "purpose of encryption of location metadata, whereas it encrypts location url " "only for specific APIs. Also if enabled this during an upgrade, may disrupt " "existing deployments, as it does not support/provide db upgrade script to " "encrypt existing location URLs. Moreover, its functionality for encrypting " "location URLs is inconsistent which resulting in download failures." msgid "" "The ability to update an image to have 'community' visibility is governed by " "a policy target named 'communitize_image'. The default is empty, that is, " "any user may communitize an image." msgstr "" "The ability to update an image to have 'community' visibility is governed by " "a policy target named 'communitize_image'. The default is empty, that is, " "any user may communitise an image." msgid "" "The change in migration engine has been undertaken in order to enable zero-" "downtime database upgrades, which are part of the effort to implement " "rolling upgrades for Glance (scheduled for the Pike release)." msgstr "" "The change in migration engine has been undertaken in order to enable zero-" "downtime database upgrades, which are part of the effort to implement " "rolling upgrades for Glance (scheduled for the Pike release)." msgid "" "The cinder store lazy migration code assumed that the user performing the " "GET was authorized to modify the image in order to perform the update. This " "will not be the case for shared or public images where the user is not the " "owner or an admin, and would result in a 404 to the user if a migration is " "needed but not completed. Now, we delay the migration if we are not " "sufficiently authorized, allowing the first GET by the owner (or an admin) " "to perform it. See Bug 1932337_ for more information." msgstr "" "The Cinder store lazy migration code assumed that the user performing the " "GET was authorised to modify the image in order to perform the update. This " "will not be the case for shared or public images where the user is not the " "owner or an admin, and would result in a 404 to the user if a migration is " "needed but not completed. Now, we delay the migration if we are not " "sufficiently authorised, allowing the first GET by the owner (or an admin) " "to perform it. See Bug 1932337_ for more information." msgid "" "The configuration option ``image_cache_sqlite_db`` related to ``sqlite`` " "cache driver is also deprecated and is subject to removal at the beginning " "of `E` (2025.1) development cycle." msgstr "" "The configuration option ``image_cache_sqlite_db`` related to ``sqlite`` " "cache driver is also deprecated and is subject to removal at the beginning " "of `E` (2025.1) development cycle." msgid "" "The configuration options ``work_dir`` and ``node_staging_uri`` are " "deprecated and will be removed early in the 'U' development cycle." msgstr "" "The configuration options ``work_dir`` and ``node_staging_uri`` are " "deprecated and will be removed early in the 'U' development cycle." msgid "" "The database migration engine used by Glance for database upgrades has been " "changed from *SQLAlchemy Migrate* to *Alembic* in this release." msgstr "" "The database migration engine used by Glance for database upgrades has been " "changed from *SQLAlchemy Migrate* to *Alembic* in this release." msgid "" "The database migration engine used by Glance for database upgrades was " "changed from *SQLAlchemy Migrate* to *Alembic* in the 14.0.0 (Ocata) " "release. Support for *SQLAlchemy Migrate* has now been removed. This means " "in order to upgrade from a pre-Ocata release to Xena or later, you must " "upgrade to Wallaby or earlier first." msgstr "" "The database migration engine used by Glance for database upgrades was " "changed from *SQLAlchemy Migrate* to *Alembic* in the 14.0.0 (Ocata) " "release. Support for *SQLAlchemy Migrate* has now been removed. This means " "in order to upgrade from a pre-Ocata release to Xena or later, you must " "upgrade to Wallaby or earlier first." msgid "" "The default policy for the `metadef` API has changed from \"open to everyone" "\" to \"only admins can create and modify resources\". We believe that this " "is by far the most common use-case and the only sane default. See Bug " "1916926_ for more details." msgstr "" "The default policy for the `metadef` API has changed from \"open to everyone" "\" to \"only admins can create and modify resources\". We believe that this " "is by far the most common use-case and the only sane default. See Bug " "1916926_ for more details." msgid "" "The default secure hash algorithm is SHA-512. It should be suitable for " "most applications." msgstr "" "The default secure hash algorithm is SHA-512. It should be suitable for " "most applications." msgid "" "The default value for the API configuration option ``workers`` was " "previously the number of CPUs available. It has been changed to be the min " "of {number of CPUs, 8}. Any value set for that option, of course, is " "honored. See Bug 1748916_ for details." msgstr "" "The default value for the API configuration option ``workers`` was " "previously the number of CPUs available. It has been changed to be the min " "of {number of CPUs, 8}. Any value set for that option, of course, is " "honoured. See Bug 1748916_ for details." msgid "" "The default value of 'shared' may seem weird, but it preserves the pre-" "upgrade workflow of: (1) create an image with default visibility, (2) add " "members to that image. Further, an image with a visibility of 'shared' that " "has no members is not accessible to other users, so it is functionally a " "private image." msgstr "" "The default value of 'shared' may seem weird, but it preserves the pre-" "upgrade workflow of: (1) create an image with default visibility, (2) add " "members to that image. Further, an image with a visibility of 'shared' that " "has no members is not accessible to other users, so it is functionally a " "private image." msgid "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customized or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgstr "" "The default value of ``[oslo_policy] policy_file`` config option has been " "changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing " "customised or previously generated static policy JSON files (which are not " "needed by default), should generate new policy files or convert them in YAML " "format. Use the `oslopolicy-convert-json-to-yaml `_ tool to " "convert a JSON to YAML formatted policy file in backward compatible way." msgid "" "The default value of the Glance API configuration option ``admin_role`` has " "been changed in this release. If you were also using the default policy " "configuration, this change will not affect you. If you were *not* using the " "default policy configuration, please read on." msgstr "" "The default value of the Glance API configuration option ``admin_role`` has " "been changed in this release. If you were also using the default policy " "configuration, this change will not affect you. If you were *not* using the " "default policy configuration, please read on." msgid "The deprecated 'enable_v2_api' config option has been removed." msgstr "The deprecated 'enable_v2_api' config option has been removed." msgid "" "The deprecation path for the configuration option " "``show_multiple_locations`` has been changed because the mitigation " "instructions for `OSSN-0065`_ refer to this option. It is now subject to " "removal on or after the **Pike** release. The help text for this option has " "been updated accordingly." msgstr "" "The deprecation path for the configuration option " "``show_multiple_locations`` has been changed because the mitigation " "instructions for `OSSN-0065`_ refer to this option. It is now subject to " "removal on or after the **Pike** release. The help text for this option has " "been updated accordingly." msgid "" "The discovery calls defined in the `refactored image import spec`_ remain in " "an abbreviated form in this release." msgstr "" "The discovery calls defined in the `refactored image import spec`_ remain in " "an abbreviated form in this release." msgid "" "The exact format of the compressed file is unspecified. It is the " "responsibility of the consuming service to analyze the data payload and " "determine the compression format. A particular OpenStack service may only " "support specific formats. Thus, even if a service does support the " "``compressed`` container format, this does not imply that the service can " "handle arbitrary compression formats. Consult the documentation for the " "service that will consume your image for details." msgstr "" "The exact format of the compressed file is unspecified. It is the " "responsibility of the consuming service to analyse the data payload and " "determine the compression format. A particular OpenStack service may only " "support specific formats. Thus, even if a service does support the " "``compressed`` container format, this does not imply that the service can " "handle arbitrary compression formats. Consult the documentation for the " "service that will consume your image for details." msgid "" "The following are some highlights of the bug fixes included in this release." msgstr "" "The following are some highlights of the bug fixes included in this release." msgid "" "The following metadata definitions have been modified in the Pike release:" msgstr "" "The following metadata definitions have been modified in the Pike release:" msgid "" "The following metadata definitions have been modified in the Queens release:" msgstr "" "The following metadata definitions have been modified in the Queens release:" msgid "" "The following metadata definitions have been modified in the Rocky release:" msgstr "" "The following metadata definitions have been modified in the Rocky release:" msgid "" "The following metadata definitions have been modified in the Train release:" msgstr "" "The following metadata definitions have been modified in the Train release:" msgid "The following metadata definitions have been modified:" msgstr "The following metadata definitions have been modified:" msgid "" "The glance configuration options have been improved with detailed help " "texts, defaults for sample configuration files, explicit choices of values " "for operators to choose from, and a strict range defined with ``min`` and " "``max`` boundaries." msgstr "" "The glance configuration options have been improved with detailed help " "texts, defaults for sample configuration files, explicit choices of values " "for operators to choose from, and a strict range defined with ``min`` and " "``max`` boundaries." msgid "" "The glance-api service no longer attempts to load ``api-paste.ini`` file as " "its service config file. All config options should be written in service " "config files such as ``glance-api.conf``." msgstr "" "The glance-api service no longer attempts to load ``api-paste.ini`` file as " "its service config file. All config options should be written in service " "config files such as ``glance-api.conf``." msgid "" "The glance-scrubber utility is now multistore aware. If you are using the " "multistore feature, you must define configuration options for " "``os_glance_tasks_store`` and ``os_glance_staging_store`` in the ``glance-" "scrubber.conf`` file. See the \"Reserved Stores\" section of the \"Multi " "Store Support\" chapter of the Glance Administration Guide for more " "information." msgstr "" "The glance-scrubber utility is now multistore aware. If you are using the " "multistore feature, you must define configuration options for " "``os_glance_tasks_store`` and ``os_glance_staging_store`` in the ``glance-" "scrubber.conf`` file. See the \"Reserved Stores\" section of the \"Multi " "Store Support\" chapter of the Glance Administration Guide for more " "information." msgid "" "The identifier ``compressed`` has been added to the list of supported " "container formats. The intent is that this format identifier will be used " "for any compressed file archive format (for example, gzip or rar) that is " "not otherwise covered by the existing container format identifiers." msgstr "" "The identifier ``compressed`` has been added to the list of supported " "container formats. The intent is that this format identifier will be used " "for any compressed file archive format (for example, gzip or rar) that is " "not otherwise covered by the existing container format identifiers." msgid "" "The identifier ``ploop`` has been added to the list of supported disk " "formats in Glance. The respective configuration option has been updated and " "the default list shows ``ploop`` as a supported format." msgstr "" "The identifier ``ploop`` has been added to the list of supported disk " "formats in Glance. The respective configuration option has been updated and " "the default list shows ``ploop`` as a supported format." msgid "" "The identifier ``vhdx`` has been added to the list of supported disk formats " "in Glance. The respective configuration option has been updated and the " "default list shows ``vhdx`` as a supported format." msgstr "" "The identifier ``vhdx`` has been added to the list of supported disk formats " "in Glance. The respective configuration option has been updated and the " "default list shows ``vhdx`` as a supported format." msgid "" "The image signature verification feature has been updated to follow the " "\"sign-the-data\" approach, which uses a signature of the image data " "directly. The prior deprecated \"sign-the-hash\" approach, which uses a " "signature of an MD5 hash of the image data, has been removed." msgstr "" "The image signature verification feature has been updated to follow the " "\"sign-the-data\" approach, which uses a signature of the image data " "directly. The prior deprecated \"sign-the-hash\" approach, which uses a " "signature of an MD5 hash of the image data, has been removed." msgid "" "The image-create operation allows a visibility to be set at the time of " "image creation. This option was probably not used much given that " "previously there were only two visibility values available, one of which " "('public') is by default unassignable by end users. Operators may wish to " "update their documentation or tooling to specify a visibility value when end " "users create images. To summarize:" msgstr "" "The image-create operation allows a visibility to be set at the time of " "image creation. This option was probably not used much given that " "previously there were only two visibility values available, one of which " "('public') is by default unassignable by end users. Operators may wish to " "update their documentation or tooling to specify a visibility value when end " "users create images. To summarise:" msgid "" "The image-list call to the Images v2 API now recognizes a ``protected`` " "query-string parameter. This parameter accepts only two values: either " "``true`` or ``false``. The filter is case-sensitive. Any other value will " "result in a 400 response to the request. See the `protected filter " "specification`_ document for details." msgstr "" "The image-list call to the Images v2 API now recognizes a ``protected`` " "query-string parameter. This parameter accepts only two values: either " "``true`` or ``false``. The filter is case-sensitive. Any other value will " "result in a 400 response to the request. See the `protected filter " "specification`_ document for details." msgid "" "The initial implementation of the image signature verification feature in " "Glance was insecure, because it relied on an MD5 hash of the image data. " "More details can be found in bug 1516031. This \"sign-the-hash\" approach " "was deprecated in Mitaka, and has been removed in Newton. Related " "CVE-2015-8234." msgstr "" "The initial implementation of the image signature verification feature in " "Glance was insecure, because it relied on an MD5 hash of the image data. " "More details can be found in bug 1516031. This \"sign-the-hash\" approach " "was deprecated in Mitaka, and has been removed in Newton. Related " "CVE-2015-8234." msgid "" "The intent of the ``compressed`` container format identifier introduced in " "this release is that it will be used for any compressed file archive format " "(for example, gzip or rar) that is not otherwise covered by the existing " "container format identifiers." msgstr "" "The intent of the ``compressed`` container format identifier introduced in " "this release is that it will be used for any compressed file archive format " "(for example, gzip or rar) that is not otherwise covered by the existing " "container format identifiers." msgid "" "The internal store identifiers introduced in this release are " "``os_glance_tasks_store`` and ``os_glance_staging_store``." msgstr "" "The internal store identifiers introduced in this release are " "``os_glance_tasks_store`` and ``os_glance_staging_store``." msgid "" "The interoperable image import functionality uses the Glance tasks engine. " "This is transparent to end users, as they do *not* use the Tasks API for the " "interoperable image import workflow. The operator, however, must make sure " "that the following configuration options are set correctly." msgstr "" "The interoperable image import functionality uses the Glance tasks engine. " "This is transparent to end users, as they do *not* use the Tasks API for the " "interoperable image import workflow. The operator, however, must make sure " "that the following configuration options are set correctly." msgid "" "The introduction of the ``compressed`` container format in this release " "gives us the opportunity to remind you that Glance does not verify that the " "``container_format`` image property is accurate for *any* container format. " "It is the responsibility of the image consumer to verify the image data " "payload format and take appropriate action in the case of a misdescribed " "image." msgstr "" "The introduction of the ``compressed`` container format in this release " "gives us the opportunity to remind you that Glance does not verify that the " "``container_format`` image property is accurate for *any* container format. " "It is the responsibility of the image consumer to verify the image data " "payload format and take appropriate action in the case of a misdescribed " "image." msgid "" "The latest release of glance_store library does not have the support for the " "``s3`` driver. All code references of the same have been removed from the " "library. As this release of Glance uses the updated glance_store library, " "you will find the ``s3`` driver support removed from Glance too. For example " "the Glance image location strategy modules no longer offer the ``s3`` driver " "support." msgstr "" "The latest release of glance_store library does not have the support for the " "``s3`` driver. All code references of the same have been removed from the " "library. As this release of Glance uses the updated glance_store library, " "you will find the ``s3`` driver support removed from Glance too. For example " "the Glance image location strategy modules no longer offer the ``s3`` driver " "support." msgid "" "The legacy 'checksum' image property, which provides an MD5 message digest " "of the image data, is preserved for backward compatibility." msgstr "" "The legacy 'checksum' image property, which provides an MD5 message digest " "of the image data, is preserved for backward compatibility." msgid "" "The lock_path config option from oslo.concurrency is now required for using " "the sql image_cache driver. If one is not specified it will default to the " "image_cache_dir and emit a warning." msgstr "" "The lock_path config option from oslo.concurrency is now required for using " "the SQL image_cache driver. If one is not specified it will default to the " "image_cache_dir and emit a warning." msgid "" "The metadata definition for ``hypervisor_type`` in the ``OS::Compute::" "Hypervisor`` namespace has been extended to include the Virtuozzo " "hypervisor, designated as ``vz``. You may upgrade the definition using:" msgstr "" "The metadata definition for ``hypervisor_type`` in the ``OS::Compute::" "Hypervisor`` namespace has been extended to include the Virtuozzo " "hypervisor, designated as ``vz``. You may upgrade the definition using:" msgid "" "The metadefs schemas for 'property', 'properties', 'tag', 'tags', 'object', " "and 'objects' previously specified a 'name' element of maximum 255 " "characters. Any attempt to add a name of greater than 80 characters in " "length, however, resulted in a 500 response. The schemas have been corrected " "to specify a maximum length of 80 characters for the 'name' field." msgstr "" "The metadefs schemas for 'property', 'properties', 'tag', 'tags', 'object', " "and 'objects' previously specified a 'name' element of maximum 255 " "characters. Any attempt to add a name of greater than 80 characters in " "length, however, resulted in a 500 response. The schemas have been corrected " "to specify a maximum length of 80 characters for the 'name' field." msgid "" "The migration of image visibility assigns sensible values to images, namely, " "'private' to images that end users have *not* assigned members, and 'shared' " "to those images that have members at the time of the upgrade. Previously, " "if an end user wanted to share a private image, a member could be added " "directly. After the upgrade, the image will have to have its visibility " "changed to 'shared' before a member can be assigned." msgstr "" "The migration of image visibility assigns sensible values to images, namely, " "'private' to images that end users have *not* assigned members, and 'shared' " "to those images that have members at the time of the upgrade. Previously, " "if an end user wanted to share a private image, a member could be added " "directly. After the upgrade, the image will have to have its visibility " "changed to 'shared' before a member can be assigned." msgid "" "The migration path for operators who were using this option in its " "nondefault ``False`` setting is to set the ``image_property_quota`` option " "to ``0``. Since many other OpenStack services depend upon the ability to " "read/write custom image properties, however, we suspect that no one has been " "using the option with a nondefault value." msgstr "" "The migration path for operators who were using this option in its " "nondefault ``False`` setting is to set the ``image_property_quota`` option " "to ``0``. Since many other OpenStack services depend upon the ability to " "read/write custom image properties, however, we suspect that no one has been " "using the option with a nondefault value." msgid "" "The new ``tasks_api_access`` policy target directly controls access to the " "Tasks API, whereas targets just mentioned indirectly affect what can be " "manipulated via the API by controlling what operations can be performed on " "Glance's internal task objects. The key point is that if you want to expose " "the new interoperable image import process to end users while keeping the " "Tasks API admin-only, you can accomplish this by using the following " "settings:" msgstr "" "The new ``tasks_api_access`` policy target directly controls access to the " "Tasks API, whereas targets just mentioned indirectly affect what can be " "manipulated via the API by controlling what operations can be performed on " "Glance's internal task objects. The key point is that if you want to expose " "the new interoperable image import process to end users while keeping the " "Tasks API admin-only, you can accomplish this by using the following " "settings:" msgid "" "The plan continues to be to eliminate the option and use only policies to " "control image locations access. This, however, requires some major " "refactoring, as discussed in the `draft Policy Refactor spec `_. Further, there is no projected timeline for " "this change, as no one has been able to commit time to it. (The Glance team " "would be happy to discuss this more with anyone interested in working on it.)" msgstr "" "The plan continues to be to eliminate the option and use only policies to " "control image locations access. This, however, requires some major " "refactoring, as discussed in the `draft Policy Refactor spec `_. Further, there is no projected timeline for " "this change, as no one has been able to commit time to it. (The Glance team " "would be happy to discuss this more with anyone interested in working on it.)" msgid "" "The plan is to eliminate the option and use only policies to control image " "locations access. This, however, requires some major refactoring. See the " "`draft Policy Refactor spec `_ for " "more information." msgstr "" "The plan is to eliminate the option and use only policies to control image " "locations access. This, however, requires some major refactoring. See the " "`draft Policy Refactor spec `_ for " "more information." msgid "" "The policies protecting the image API have been deprecated in favor of more " "consistent defaults that use the `member` and `reader` default roles from " "keystone. If your deployment relies on overriding the default policies, " "please review the new defaults and how they may impact your deployment." msgstr "" "The policies protecting the image API have been deprecated in favour of more " "consistent defaults that use the `member` and `reader` default roles from " "Keystone. If your deployment relies on overriding the default policies, " "please review the new defaults and how they may impact your deployment." msgid "" "The policy check that we ran when an image is not found is removed. This " "previously allowed an operator to override the behavior of a 404 to be a " "403, in contrast to the API documentation and design goals of defaulting to " "404 for information-hiding reasons. This check is no longer run in the case " "of a NotFound result from the database, so any policy attempting to control " "that behavior will be ignored from now on." msgstr "" "The policy check that we ran when an image is not found is removed. This " "previously allowed an operator to override the behavior of a 404 to be a " "403, in contrast to the API documentation and design goals of defaulting to " "404 for information-hiding reasons. This check is no longer run in the case " "of a NotFound result from the database, so any policy attempting to control " "that behaviour will be ignored from now on." msgid "" "The properties ``cinder_encryption_key_id`` and " "``cinder_encryption_key_deletion_policy`` have been added to the *common " "image properties* and appear in the image schema. See the \"New Features\" " "section of these notes for information about these image properties." msgstr "" "The properties ``cinder_encryption_key_id`` and " "``cinder_encryption_key_deletion_policy`` have been added to the *common " "image properties* and appear in the image schema. See the \"New Features\" " "section of these notes for information about these image properties." msgid "" "The property ``img_hide_hypervisor_id`` has been added to the namespace " "``OS::Compute::LibvirtImage``." msgstr "" "The property ``img_hide_hypervisor_id`` has been added to the namespace " "``OS::Compute::LibvirtImage``." msgid "" "The property img_linked_clone_ has been added to the namespace ``OS::" "Compute::VMware``." msgstr "" "The property img_linked_clone_ has been added to the namespace ``OS::" "Compute::VMware``." msgid "" "The sample configuration files autogenerated using the oslo-config-generator " "tool now give consistent ordering of the store drivers configurations." msgstr "" "The sample configuration files autogenerated using the oslo-config-generator " "tool now give consistent ordering of the store drivers configurations." msgid "" "The section on Interoperable Image Import in the `Glance Administration " "Guide`_ has been updated. Please see that section of the Guide for " "information about the configuration required to make the import " "functionality work correctly." msgstr "" "The section on Interoperable Image Import in the `Glance Administration " "Guide`_ has been updated. Please see that section of the Guide for " "information about the configuration required to make the import " "functionality work correctly." msgid "" "The secure \"multihash\" image properties, ``os_hash_algo`` and " "``os_hash_value`` have been available on images since glance version 17.0.0 " "(Rocky). Until this point, the MD5 ``checksum`` property has been populated " "solely for backward compatability. It is not, however, necessary for " "validating downloaded image data." msgstr "" "The secure \"multihash\" image properties, ``os_hash_algo`` and " "``os_hash_value`` have been available on images since glance version 17.0.0 " "(Rocky). Until this point, the MD5 ``checksum`` property has been populated " "solely for backward compatibility. It is not, however, necessary for " "validating downloaded image data." msgid "" "The secure RBAC personas implemented in Wallaby are marked as experimental. " "They will become stable in a future release. You can read more about the " "various personas in keystone's `Administrator Guide `_." msgstr "" "The secure RBAC personas implemented in Wallaby are marked as experimental. " "They will become stable in a future release. You can read more about the " "various personas in keystone's `Administrator Guide `_." msgid "" "The secure hash algorithm used is an operator-configurable setting. See the " "help text for 'hashing_algorithm' in the sample Glance configuration file " "for more information." msgstr "" "The secure hash algorithm used is an operator-configurable setting. See the " "help text for 'hashing_algorithm' in the sample Glance configuration file " "for more information." msgid "" "The ssl support from Glance has been removed as it worked only under PY27 " "which is not anymore supported environment. Termination of encrypted " "connections needs to happen externally as soon as move to PY3 happens. Any " "deployment needing end to end encryption would need to put either reverse " "proxy (using fully blown http server like Apache or Nginx will cause " "significant performance hit and we advice using something more simple that " "does not break the http protocol) in front of the service or utilize ssl " "tunneling (like stunnel) between loadbalancers and glance-api." msgstr "" "The SSL support from Glance has been removed as it worked only under PY27 " "which is not any more supported environment. Termination of encrypted " "connections needs to happen externally as soon as move to PY3 happens. Any " "deployment needing end to end encryption would need to put either reverse " "proxy (using fully blown http server like Apache or Nginx will cause " "significant performance hit and we advice using something more simple that " "does not break the http protocol) in front of the service or utilize SSL " "tunnelling (like stunnel) between loadbalancers and glance-api." msgid "" "The store drivers configuration order in the sample or autogenerated files " "should be expected to be alphabetical as - ``cinder``, ``filesystem``, " "``http``, ``rbd``, ``sheepdog``, ``swift``, ``vmware``." msgstr "" "The store drivers configuration order in the sample or autogenerated files " "should be expected to be alphabetical as - ``cinder``, ``filesystem``, " "``http``, ``rbd``, ``sheepdog``, ``swift``, ``vmware``." msgid "" "The store name prefix ``os_glance_*`` is reserved by Glance for internal " "stores. Glance will refuse to start if a store with this prefix is included " "in the ``enabled_backends`` option." msgstr "" "The store name prefix ``os_glance_*`` is reserved by Glance for internal " "stores. Glance will refuse to start if a store with this prefix is included " "in the ``enabled_backends`` option." msgid "" "The task API is being deprecated and it has been made admin only. If " "deployers of Glance would like to have this API as a public one, it is " "necessary to change the `policy.json` file and remove `role:admin` from " "every `task` related field." msgstr "" "The task API is being deprecated and it has been made admin only. If " "deployers of Glance would like to have this API as a public one, it is " "necessary to change the `policy.json` file and remove `role:admin` from " "every `task` related field." msgid "" "The task API was added to allow users for uploading images asynchronously " "and for deployers to have more control in the upload process. Unfortunately, " "this API has not worked the way it was expected to. Therefore, the task API " "has entered a deprecation period and it is meant to be replaced by the new " "import API. This change makes the task API admin only by default so that it " "is not accidentally deployed as a public API." msgstr "" "The task API was added to allow users for uploading images asynchronously " "and for deployers to have more control in the upload process. Unfortunately, " "this API has not worked the way it was expected to. Therefore, the task API " "has entered a deprecation period and it is meant to be replaced by the new " "import API. This change makes the task API admin only by default so that it " "is not accidentally deployed as a public API." msgid "" "The unused `modify_task` policy has been deprecated for removal. It was " "never honored or checked as part of an API operation. As a result, it has " "been deprecated for removal since overriding it has no direct impact on the " "tasks API, which remains a deprecated, admin-only API." msgstr "" "The unused `modify_task` policy has been deprecated for removal. It was " "never honoured or checked as part of an API operation. As a result, it has " "been deprecated for removal since overriding it has no direct impact on the " "tasks API, which remains a deprecated, admin-only API." msgid "" "The use_user_token, admin_user, admin_password, admin_tenant_name, auth_url, " "auth_strategy and auth_region options in the [DEFAULT] configuration section " "in glance-api.conf are deprecated, and will be removed in the O release. See " "https://wiki.openstack.org/wiki/OSSN/OSSN-0060" msgstr "" "The use_user_token, admin_user, admin_password, admin_tenant_name, auth_url, " "auth_strategy and auth_region options in the [DEFAULT] configuration section " "in glance-api.conf are deprecated, and will be removed in the O release. See " "https://wiki.openstack.org/wiki/OSSN/OSSN-0060" msgid "" "The version 2.6 API is being introduced as EXPERIMENTAL because it is a " "Minimal Viable Product delivery of the functionality described in the " "`refactored image import`_ specification. As an MVP, the responses " "described in that specification are abbreviated in version 2.6. It is " "expected that version 2.6 will be completed in Queens, but at this time, we " "encourage operators to try out the new functionality, but keep in mind its " "EXPERIMENTAL nature." msgstr "" "The version 2.6 API is being introduced as EXPERIMENTAL because it is a " "Minimal Viable Product delivery of the functionality described in the " "`refactored image import`_ specification. As an MVP, the responses " "described in that specification are abbreviated in version 2.6. It is " "expected that version 2.6 will be completed in Queens, but at this time, we " "encourage operators to try out the new functionality, but keep in mind its " "EXPERIMENTAL nature." msgid "" "The weighing mechanism introduced in the Bobcat development cycle can be " "used by operators who would like to prioritize certain stores over others." msgstr "" "The weighing mechanism introduced in the Bobcat development cycle can be " "used by operators who would like to prioritise certain stores over others." msgid "" "The workaround is to continue to use the ``show_multiple_locations`` option " "in a dedicated \"internal\" Glance node that is not accessible to end users. " "We continue to recommend that image locations not be exposed to end users. " "See `OSSN-0065 `_ for more " "information." msgstr "" "The workaround is to continue to use the ``show_multiple_locations`` option " "in a dedicated \"internal\" Glance node that is not accessible to end users. " "We continue to recommend that image locations not be exposed to end users. " "See `OSSN-0065 `_ for more " "information." msgid "" "There are some limitations with this method of deploying Glance and we do " "not recommend its use in production environments at this time. See the " "`Known Issues`_ section of this document for more information." msgstr "" "There are some limitations with this method of deploying Glance and we do " "not recommend its use in production environments at this time. See the " "`Known Issues`_ section of this document for more information." msgid "" "There is no projected timeline for this change, as no one has been able to " "commit time to it. The Glance team would be happy to discuss this more with " "anyone interested in working on it." msgstr "" "There is no projected timeline for this change, as no one has been able to " "commit time to it. The Glance team would be happy to discuss this more with " "anyone interested in working on it." msgid "" "There was a bug in the **experimental** zero-downtime database upgrade path " "introduced in the Ocata release that prevented the **experimental** upgrade " "from working. This has been fixed in the Pike release. The bug did not " "affect the the normal database upgrade operation." msgstr "" "There was a bug in the **experimental** zero-downtime database upgrade path " "introduced in the Ocata release that prevented the **experimental** upgrade " "from working. This has been fixed in the Pike release. The bug did not " "affect the the normal database upgrade operation." msgid "" "There was a typographical error in the properties target for the ``OS:::" "Nova::Server`` resource type association in the ``CIM::" "ProcessorAllocationSettingData`` namespace. It has been been corrected to " "``scheduler_hints``." msgstr "" "There was a typographical error in the properties target for the ``OS:::" "Nova::Server`` resource type association in the ``CIM::" "ProcessorAllocationSettingData`` namespace. It has been been corrected to " "``scheduler_hints``." msgid "These are read-only image properties and are not user-modifiable." msgstr "These are read-only image properties and are not user-modifiable." msgid "" "These local directories are used by Glance for the temporary storage of data " "during the interoperable image import process and by the tasks engine. This " "release introduces the ability to instead use a backend filesystem store " "accessed via the glance_store library for this temporary storage. Please " "note the following:" msgstr "" "These local directories are used by Glance for the temporary storage of data " "during the interoperable image import process and by the tasks engine. This " "release introduces the ability to instead use a backend filesystem store " "accessed via the glance_store library for this temporary storage. Please " "note the following:" msgid "" "This change applies only to operators using the VMware datastore or " "filesystem stores" msgstr "" "This change applies only to operators using the VMware datastore or " "filesystem stores" msgid "" "This change applies only to operators who are using multiple image locations" msgstr "" "This change applies only to operators who are using multiple image locations" msgid "This change applies only to the ``store_type_preference`` option" msgstr "This change applies only to the ``store_type_preference`` option" msgid "" "This change is backward compatible, that is, the old names will be " "recognized by the code during the deprecation period. Support for the " "deprecated names will be removed in the **Pike** release" msgstr "" "This change is backward compatible, that is, the old names will be " "recognized by the code during the deprecation period. Support for the " "deprecated names will be removed in the **Pike** release" msgid "" "This deprecation notice also applies to the following configuration options:" msgstr "" "This deprecation notice also applies to the following configuration options:" msgid "" "This experimental feature is optionally exposed as the EXPERIMENTAL Image " "Service API version 2.8. Its use in production systems is currently **not " "supported**. We encourage people to use this feature for testing purposes " "and report any issues so that it can be made stable and fully supported in " "the Stein release." msgstr "" "This experimental feature is optionally exposed as the EXPERIMENTAL Image " "Service API version 2.8. Its use in production systems is currently **not " "supported**. We encourage people to use this feature for testing purposes " "and report any issues so that it can be made stable and fully supported in " "the Stein release." msgid "" "This feature is enabled by default, but it is optional. Whether it is " "offered at your installation depends on the value of the " "``enabled_import_methods`` configuration option in the ``glance-api.conf`` " "file (assuming, of course, that you have not disabled image import at your " "site)." msgstr "" "This feature is enabled by default, but it is optional. Whether it is " "offered at your installation depends on the value of the " "``enabled_import_methods`` configuration option in the ``glance-api.conf`` " "file (assuming, of course, that you have not disabled image import at your " "site)." msgid "" "This has necessitated a change in the location and naming convention for " "migration scripts. Developers, operators, and DevOps are strongly " "encouraged to read through the `Database Management`_ section of the Glance " "documentation for details of the changes introduced in the Ocata release. " "Here's a brief summary of the changes:" msgstr "" "This has necessitated a change in the location and naming convention for " "migration scripts. Developers, operators, and DevOps are strongly " "encouraged to read through the `Database Management`_ section of the Glance " "documentation for details of the changes introduced in the Ocata release. " "Here's a brief summary of the changes:" msgid "" "This is a good time to review your Glance ``policy.json`` file to make sure " "that if it contains a ``default`` target, the rule is fairly restrictive " "(\"role:admin\" or \"!\" are good choices). The ``default`` target is used " "when the policy engine cannot find the target it's looking for. This can " "happen when a new policy is introduced but the policy file in use is from a " "prior release." msgstr "" "This is a good time to review your Glance ``policy.json`` file to make sure " "that if it contains a ``default`` target, the rule is fairly restrictive " "(\"role:admin\" or \"!\" are good choices). The ``default`` target is used " "when the policy engine cannot find the target it's looking for. This can " "happen when a new policy is introduced but the policy file in use is from a " "prior release." msgid "" "This is in addition to the chunked transfer encoding problems addressed by " "`Bug 1703856`_ and will be more difficult to fix. (Additionally, as far as " "we are aware, the fix for `Bug 1703856`_ has never been tested at scale.) " "Briefly, Glance tasks are run by the API service and would have to be split " "out into a different service so that API alone would run under uWSGI. The " "Glance project team did not have sufficient testing and development " "resources during the Queens cycle to attempt this (or even to discuss " "whether this is in fact a good idea)." msgstr "" "This is in addition to the chunked transfer encoding problems addressed by " "`Bug 1703856`_ and will be more difficult to fix. (Additionally, as far as " "we are aware, the fix for `Bug 1703856`_ has never been tested at scale.) " "Briefly, Glance tasks are run by the API service and would have to be split " "out into a different service so that API alone would run under uWSGI. The " "Glance project team did not have sufficient testing and development " "resources during the Queens cycle to attempt this (or even to discuss " "whether this is in fact a good idea)." msgid "This option has had no effect since the removal of native SSL support." msgstr "This option has had no effect since the removal of native SSL support." msgid "" "This point release contains minor changes to keep the Ocata release of " "Glance stable with respect to current operating system packages." msgstr "" "This point release contains minor changes to keep the Ocata release of " "Glance stable with respect to current operating system packages." msgid "" "This point release contains minor changes to keep the Pike release of Glance " "stable with respect to current operating system packages." msgstr "" "This point release contains minor changes to keep the Pike release of Glance " "stable with respect to current operating system packages." msgid "" "This prevents roles that should not have access to these APIs from " "performing the APIs associated with the actions above." msgstr "" "This prevents roles that should not have access to these APIs from " "performing the APIs associated with the actions above." msgid "This release also bumps the Images API CURRENT version to 2.9" msgstr "This release also bumps the Images API CURRENT version to 2.9" msgid "" "This release also contains some feature work, life improving changes and bug " "fixes. Please refer the rest of the release notes and docs for details." msgstr "" "This release also contains some feature work, life improving changes and bug " "fixes. Please refer the rest of the release notes and docs for details." msgid "" "This release brings additional functionality to the stores API. The stores " "detail API helps in providing the store specific information." msgstr "" "This release brings additional functionality to the stores API. The stores " "detail API helps in providing the store specific information." msgid "" "This release brings additional functionality to the unified quota work done " "in the previous release. A usage API is now available, which provides a way " "for users to see their current quota limits and their active resource usage " "towards them. For more information, see the discovery section in the `api-" "ref `_." msgstr "" "This release brings additional functionality to the unified quota work done " "in the previous release. A usage API is now available, which provides a way " "for users to see their current quota limits and their active resource usage " "towards them. For more information, see the discovery section in the `api-" "ref `_." msgid "" "This release brings expansion in the functionality of stores-detail API. " "The stores detail API will list the way each store is configured, whereas " "previously this worked only for rbd store. The API remains admin-only by " "default as it exposes backend information." msgstr "" "This release brings expansion in the functionality of stores-detail API. The " "store's detail API will list the way each store is configured, whereas " "previously this worked only for the RBD store. The API remains admin-only by " "default as it exposes backend information." msgid "" "This release brings the additional functionality of adding new location to a " "``queued`` state image which will replace the image-update mechanism for " "consumers like cinder and nova to address OSSN-0090 and OSSN-0065." msgstr "" "This release brings the additional functionality of adding new location to a " "``queued`` state image which will replace the image-update mechanism for " "consumers like Cinder and Nova to address OSSN-0090 and OSSN-0065." msgid "" "This release brings the additional functionality of get locations associated " "to an image accessible to only service users i.e., consumers like cinder and " "nova for OSSN-0090 and OSSN-0065." msgstr "" "This release brings the additional functionality of get locations associated " "to an image accessible to only service users i.e., consumers like Cinder and " "Nova for OSSN-0090 and OSSN-0065." msgid "This release has impact on API behavior." msgstr "This release has impact on API behaviour." msgid "" "This release implements the Glance spec `Secure Hash Algorithm Support " "`_ (also known as \"multihash\"). This feature " "supplements the current 'checksum' image property with a self-describing " "secure hash. The self-description consists of two new image properties:" msgstr "" "This release implements the Glance spec `Secure Hash Algorithm Support " "`_ (also known as \"multihash\"). This feature " "supplements the current 'checksum' image property with a self-describing " "secure hash. The self-description consists of two new image properties:" msgid "" "This release implements the Glance spec `Secure Hash Algorithm Support " "`_, which introduces a self-describing \"multihash\" " "to the image-show response. This feature supplements the current 'checksum' " "image property with a self-describing secure hash. The default hashing " "algorithm is SHA-512, which is currently considered secure. In the event " "that algorithm is compromised, you will immediately be able to begin using a " "different algorithm (as long as it's supported by the Python 'hashlib' " "library and has output that fits in 128 characters) by modifying the value " "of the 'hashing_algorithm' configuration option and either restarting or " "issuing a SIGHUP to Glance." msgstr "" "This release implements the Glance spec `Secure Hash Algorithm Support " "`_, which introduces a self-describing \"multihash\" " "to the image-show response. This feature supplements the current 'checksum' " "image property with a self-describing secure hash. The default hashing " "algorithm is SHA-512, which is currently considered secure. In the event " "that algorithm is compromised, you will immediately be able to begin using a " "different algorithm (as long as it's supported by the Python 'hashlib' " "library and has output that fits in 128 characters) by modifying the value " "of the 'hashing_algorithm' configuration option and either restarting or " "issuing a SIGHUP to Glance." msgid "" "This release introduces new APIs for cache related operations. This new " "version of the cache API will help administrators to cache images on " "dedicated glance nodes as well. For more information, see the ``Cache " "Manage`` section in the `api-ref-guide `_." msgstr "" "This release introduces new APIs for cache related operations. This new " "version of the cache API will help administrators to cache images on " "dedicated Glance nodes as well. For more information, see the ``Cache " "Manage`` section in the `api-ref-guide `_." msgid "" "This release of OpenStack Glance introduces 2 new API versions. Images API " "v2.7 adds support and modifications for the Hidden Images and Multihash " "features introduced during Rocky cycle. Version 2.8 is included as an " "optional EXPERIMENTAL API for testing and preparing for multiple back-end " "support." msgstr "" "This release of OpenStack Glance introduces 2 new API versions. Images API " "v2.7 adds support and modifications for the Hidden Images and Multihash " "features introduced during Rocky cycle. Version 2.8 is included as an " "optional EXPERIMENTAL API for testing and preparing for multiple back-end " "support." msgid "" "This release prevents non-admin user to change 'size' and 'checksum' " "properties of an image after it has been deactivated via Images API v1." msgstr "" "This release prevents non-admin user to change 'size' and 'checksum' " "properties of an image after it has been deactivated via Images API v1." msgid "" "This release provides an EXPERIMENTAL implementation of the Glance spec " "`Multi-Store Backend Support `_, which allows an " "operator to configure multiple backing stores so that end users may direct " "image data to be stored in a specific backend. See `Multi Store Support " "`_ in the " "Glance Administration Guide for more information." msgstr "" "This release provides an EXPERIMENTAL implementation of the Glance spec " "`Multi-Store Backend Support `_, which allows an " "operator to configure multiple backing stores so that end users may direct " "image data to be stored in a specific backend. See `Multi Store Support " "`_ in the " "Glance Administration Guide for more information." msgid "" "This release removes endpoints and config options related to glance-" "registry. Including but not limited to config option 'data-api' which has no " "production supported options left. SimpleDB has not been supported since " "moving DB migrations to alembic and registry is removed. All registry " "specific options and config files have been removed. 'glance-registry' " "command has been removed." msgstr "" "This release removes endpoints and config options related to glance-" "registry. Including but not limited to config option 'data-api' which has no " "production supported options left. SimpleDB has not been supported since " "moving DB migrations to alembic and registry is removed. All registry " "specific options and config files have been removed. 'glance-registry' " "command has been removed." msgid "" "This renders the configuration options incapable of taking certain values " "that may have been accepted before but were actually invalid." msgstr "" "This renders the configuration options incapable of taking certain values " "that may have been accepted before but were actually invalid." msgid "" "This will result into a non-backward compatible experience before and after " "**Newton** release, for users using ``add`` feature to image locations." msgstr "" "This will result into a non-backward compatible experience before and after " "**Newton** release, for users using ``add`` feature to image locations." msgid "" "Those images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) and that have **no** image members, " "will have their visibility set to 'private'." msgstr "" "Those images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) and that have **no** image members, " "will have their visibility set to 'private'." msgid "" "Thus, we are announcing the DEPRECATION in this release of the image " "``checksum`` property. It will remain as an image property, but beginning " "with the Victoria release, the ``checksum`` will *not* be populated on new " "images." msgstr "" "Thus, we are announcing the DEPRECATION in this release of the image " "``checksum`` property. It will remain as an image property, but beginning " "with the Victoria release, the ``checksum`` will *not* be populated on new " "images." msgid "" "To enable this functionality, you must specify `glance-api.conf [DEFAULT] " "enable_secure_rbac=True` and `glance-api.conf [oslo_policy] " "enforce_new_defaults=True`. Glance will refuse to start if misconfigured." msgstr "" "To enable this functionality, you must specify `glance-api.conf [DEFAULT] " "enable_secure_rbac=True` and `glance-api.conf [oslo_policy] " "enforce_new_defaults=True`. Glance will refuse to start if misconfigured." msgid "" "To partially fix an important image locations bug 1587985, an API impacting " "change has been merged into Glance." msgstr "" "To partially fix an important image locations bug 1587985, an API impacting " "change has been merged into Glance." msgid "" "To summarize: end users do **not** need access to the Tasks API in order to " "use the new interoperable image import process. They do, however, need " "permission to access internal Glance task objects." msgstr "" "To summarize: end users do **not** need access to the Tasks API in order to " "use the new interoperable image import process. They do, however, need " "permission to access internal Glance task objects." msgid "" "To support the Block Storage service (Cinder) upload-volume-to-image action " "when the volume is an encrypted volume type, when such an image is deleted, " "Glance will now contact the OpenStack Key Management service (Barbican) and " "request it to delete the associated encryption key. Two extra properties " "must be set on the image for this to work: ``cinder_encryption_key_id`` " "(whose value is the identifier in the OpenStack Key Management service for " "the encryption key used to encrypt the volume) and " "``cinder_encryption_key_deletion_policy`` (whose value may be either " "``on_image_deletion`` or ``do_not_delete``). Please note the following:" msgstr "" "To support the Block Storage service (Cinder) upload-volume-to-image action " "when the volume is an encrypted volume type, when such an image is deleted, " "Glance will now contact the OpenStack Key Management service (Barbican) and " "request it to delete the associated encryption key. Two extra properties " "must be set on the image for this to work: ``cinder_encryption_key_id`` " "(whose value is the identifier in the OpenStack Key Management service for " "the encryption key used to encrypt the volume) and " "``cinder_encryption_key_deletion_policy`` (whose value may be either " "``on_image_deletion`` or ``do_not_delete``). Please note the following:" msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "Train release includes a change to how cache prefetching works. As the " "prefetcher was one of the last components still relying to the glance-" "registry the requirement was removed by implementing the prefetcher as part " "of glance-api. Crontab based prefetcher is not available anymore and the new " "prefetching will be set up through glance-api.conf." msgstr "" "Train release includes a change to how cache prefetching works. As the " "prefetcher was one of the last components still relying to the glance-" "registry the requirement was removed by implementing the prefetcher as part " "of glance-api. Crontab based prefetcher is not available any more and the " "new prefetching will be set up through glance-api.conf." msgid "" "Train release includes multiple important milestones in Glance development " "priorities." msgstr "" "Train release includes multiple important milestones in Glance development " "priorities." msgid "Translations have been synced from Zanata." msgstr "Translations have been synced from Zanata." msgid "Translations have been updated." msgstr "Translations have been updated." msgid "" "Until now every run of the oslo-config-generator resulted in random ordering " "of the store drivers configuration. After **Newton** release this order will " "remain consistent." msgstr "" "Until now every run of the oslo-config-generator resulted in random ordering " "of the store drivers configuration. After **Newton** release this order will " "remain consistent." msgid "" "Until now, no image status checks were in place while **adding** a location " "on it. In some circumstances, this may result in a bad user experience. It " "may also cause problems for a security team evaluating the condition of an " "image in ``deactivated`` status." msgstr "" "Until now, no image status checks were in place while **adding** a location " "on it. In some circumstances, this may result in a bad user experience. It " "may also cause problems for a security team evaluating the condition of an " "image in ``deactivated`` status." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgstr "" "Use of JSON policy files was deprecated by the ``oslo.policy`` library " "during the Victoria development cycle. As a result, this deprecation is " "being noted in the Wallaby cycle with an anticipated future removal of " "support by ``oslo.policy``. As such operators will need to convert to YAML " "policy files. Please see the upgrade notes for details on migration of any " "custom policy files." msgid "" "Use of the plugin requires configuration as described in the `The Image " "Property Injection Plugin`_ section of the Glance Admin Guide." msgstr "" "Use of the plugin requires configuration as described in the `The Image " "Property Injection Plugin`_ section of the Glance Admin Guide." msgid "" "Use the v1 API to update the image so that ``is_public`` is False. This " "will reset the image's visibility to 'shared', and it will now accept member " "operations." msgstr "" "Use the v1 API to update the image so that ``is_public`` is False. This " "will reset the image's visibility to 'shared', and it will now accept member " "operations." msgid "" "Use the v2 API to change the visibility of the image to 'shared'. Then it " "will accept members in either the v1 or v2 API." msgstr "" "Use the v2 API to change the visibility of the image to 'shared'. Then it " "will accept members in either the v1 or v2 API." msgid "Users can follow workflow execution with 2 new reserved properties:" msgstr "Users can follow workflow execution with 2 new reserved properties:" msgid "" "Users should instead rely on the secure \"multihash\" to validate image " "downloads. The python-glanceclient, for example, has been using multihash " "validation (with an optional MD5 fallback) since version 2.13.0 (Rocky)." msgstr "" "Users should instead rely on the secure \"multihash\" to validate image " "downloads. The python-glanceclient, for example, has been using multihash " "validation (with an optional MD5 fallback) since version 2.13.0 (Rocky)." msgid "Using db check" msgstr "Using db check" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "" "Ussuri release includes multiple important milestones in Glance development " "priorities." msgstr "" "Ussuri release includes multiple important milestones in Glance development " "priorities." msgid "" "Values which do not comply with the new restrictions will prevent the " "service from starting. The logs will contain a message indicating the " "problematic configuration option and the reason why the supplied value has " "been rejected." msgstr "" "Values which do not comply with the new restrictions will prevent the " "service from starting. The logs will contain a message indicating the " "problematic configuration option and the reason why the supplied value has " "been rejected." msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Visibility migration of current images" msgstr "Visibility migration of current images" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "We encourage you to compare any existing overrides in your deployment with " "the new defaults. You can use `oslopolicy-sample-generator --namespace " "glance` to generate the default policies and use them for comparison." msgstr "" "We encourage you to compare any existing overrides in your deployment with " "the new defaults. You can use `oslopolicy-sample-generator --namespace " "glance` to generate the default policies and use them for comparison." msgid "" "We have tried to minimize the impact upon end users, but want to point out " "some issues to be aware of." msgstr "" "We have tried to minimise the impact upon end users, but want to point out " "some issues to be aware of." msgid "" "We mention this so that you can be aware of this situation in your own " "testing." msgstr "" "We mention this so that you can be aware of this situation in your own " "testing." msgid "" "We recommend that all operators adopt the policy settings just described " "independently of the decision whether to expose the EXPERIMENTAL version 2.6 " "API." msgstr "" "We recommend that all operators adopt the policy settings just described " "independently of the decision whether to expose the EXPERIMENTAL version 2.6 " "API." msgid "" "We recommend to enable the both scope as well new defaults together " "otherwise you may experience some late failures with unclear error messages." msgstr "" "We recommend enabling both scopes as well new defaults together otherwise " "you may experience some late failures with unclear error messages." msgid "" "We renew that recommendation for the Queens release. In particular, Glance " "tasks (which are required for the interoperable image import functionality) " "do not execute when Glance is run under uWSGI (which is the OpenStack " "recommended way to run WSGI applications hosted by a web server)." msgstr "" "We renew that recommendation for the Queens release. In particular, Glance " "tasks (which are required for the interoperable image import functionality) " "do not execute when Glance is run under uWSGI (which is the OpenStack " "recommended way to run WSGI applications hosted by a web server)." msgid "" "We strongly encourage operators to modify their ``glance-api.conf`` files " "immediately to use the **new** names" msgstr "" "We strongly encourage operators to modify their ``glance-api.conf`` files " "immediately to use the **new** names" msgid "" "What this option does is to grant complete admin access to any authenticated " "user with a particular role. *This overrides any policy rules configured in " "the policy configuration file.* While everything will behave as expected if " "you are also using the default policy settings, this setting may cause " "anomalous behavior when you are configuring custom policies." msgstr "" "What this option does is to grant complete admin access to any authenticated " "user with a particular role. *This overrides any policy rules configured in " "the policy configuration file.* While everything will behave as expected if " "you are also using the default policy settings, this setting may cause " "anomalous behaviour when you are configuring custom policies." msgid "" "When ``enable_image_import`` is **True**, a new import-method, ``web-" "download`` is available. (In Pike, only ``glance-direct`` was offered.) " "Which import-methods you offer can be configured using the " "``enabled_import_methods`` option in the ``glance-api.conf`` file." msgstr "" "When ``enable_image_import`` is **True**, a new import-method, ``web-" "download`` is available. (In Pike, only ``glance-direct`` was offered.) " "Which import-methods you offer can be configured using the " "``enabled_import_methods`` option in the ``glance-api.conf`` file." msgid "" "When delayed delete is enabled operators are able to recover image records " "if the scrubber has been stopped before the data removal interval. While the " "image metadata is still not preserved in these cases, this provides a way to " "save the image data on accidental deletes." msgstr "" "When delayed delete is enabled operators are able to recover image records " "if the scrubber has been stopped before the data removal interval. While the " "image metadata is still not preserved in these cases, this provides a way to " "save the image data on accidental deletes." msgid "" "When the Glance image cache is being used, the CURRENT version of the Image " "service API, as indicated in the ``GET /versions`` response, is 2.16." msgstr "" "When the Glance image cache is being used, the CURRENT version of the Image " "service API, as indicated in the ``GET /versions`` response, is 2.16." msgid "" "When using Interoperable Image Import workflow, the cloud operators can now " "enable automatic image conversion to desired format. When the plugin is " "enabled end-users do not have any input to its operation but their local " "checksum might not match with checksums recorded in Glance." msgstr "" "When using Interoperable Image Import workflow, the cloud operators can now " "enable automatic image conversion to desired format. When the plugin is " "enabled end-users do not have any input to its operation but their local " "checksum might not match with checksums recorded in Glance." msgid "" "When using the multiple stores feature, each filesystem store **must** be " "configured with a different value for the ``filesystem_store_datadir`` " "option. This is not currently enforced in the code." msgstr "" "When using the multiple stores feature, each filesystem store **must** be " "configured with a different value for the ``filesystem_store_datadir`` " "option. This is not currently enforced in the code." msgid "" "While fixing race condition issue during victoria we started updating " "'message' property of the task which helps calculate time based on last " "updated time of task to burst the lock as well as show how much data has " "been copied of that image. As glance task API's are restricted from use by " "normal users we are adding new API /v2/images/{image_id}/tasks which will " "return all tasks associated with that image. In addition to task information " "this API will also return `request-id` and `user-id` to help users in " "debugging." msgstr "" "While fixing race condition issue during Victoria we started updating " "'message' property of the task which helps calculate time based on last " "updated time of task to burst the lock as well as show how much data has " "been copied of that image. As glance task API's are restricted from use by " "normal users we are adding new API /v2/images/{image_id}/tasks which will " "return all tasks associated with that image. In addition to task information " "this API will also return `request-id` and `user-id` to help users in " "debugging." msgid "" "While the 2.6 API is CURRENT, whether the interoperable image import " "functionality it makes available is exposed to end users is controlled by a " "configuration option, ``enable_image_import``. Although this option existed " "in the previous release, its effect is slightly different in Queens." msgstr "" "While the 2.6 API is CURRENT, whether the interoperable image import " "functionality it makes available is exposed to end users is controlled by a " "configuration option, ``enable_image_import``. Although this option existed " "in the previous release, its effect is slightly different in Queens." msgid "" "With the deprecation of the Images (Glance) version 1 API in the Newton " "release, it is subject to removal on or after the Pike release. The " "configuration options specific to the Images (Glance) v1 API have also been " "deprecated and are subject to removal. An indirectly related configuration " "option enable_v2_api has been deprecated too as it becomes redundant once " "the Images (Glance) v1 API is removed. Appropriate warning messages have " "been setup for the deprecated configuration options and when the Images " "(Glance) v1 API is enabled (being used). Operators are advised to deploy the " "Images (Glance) v2 API. The standard OpenStack deprecation policy will be " "followed for the removals." msgstr "" "With the deprecation of the Images (Glance) version 1 API in the Newton " "release, it is subject to removal on or after the Pike release. The " "configuration options specific to the Images (Glance) v1 API have also been " "deprecated and are subject to removal. An indirectly related configuration " "option enable_v2_api has been deprecated too as it becomes redundant once " "the Images (Glance) v1 API is removed. Appropriate warning messages have " "been setup for the deprecated configuration options and when the Images " "(Glance) v1 API is enabled (being used). Operators are advised to deploy the " "Images (Glance) v2 API. The standard OpenStack deprecation policy will be " "followed for the removals." msgid "" "With the introduction of the ``web-download`` import method, we consider the " "Image Service v2 API to have reached feature parity with the DEPRECATED v1 " "API in all important respects. Support for the Image Service API v1 ends " "with the Queens release. The `v1 API was deprecated in Newton`_ and will be " "removed from the codebase at the beginning of the Rocky development cycle. " "Please plan appropriately." msgstr "" "With the introduction of the ``web-download`` import method, we consider the " "Image Service v2 API to have reached feature parity with the DEPRECATED v1 " "API in all important respects. Support for the Image Service API v1 ends " "with the Queens release. The `v1 API was deprecated in Newton`_ and will be " "removed from the codebase at the beginning of the Rocky development cycle. " "Please plan appropriately." msgid "" "With the previous default value, any user with the ``admin`` role could act " "in an administrative context *regardless of what your policy file defined as " "the administrative context*. And this might not be a problem because " "usually the ``admin`` role is not assigned to \"regular\" end users. It " "does become a problem, however, when operators attempt to configure " "different gradations of administrator." msgstr "" "With the previous default value, any user with the ``admin`` role could act " "in an administrative context *regardless of what your policy file defined as " "the administrative context*. And this might not be a problem because " "usually the ``admin`` role is not assigned to \"regular\" end users. It " "does become a problem, however, when operators attempt to configure " "different gradations of administrator." msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "" "You can now list all images that are available to you. Use the 'all' " "visibility option." msgstr "" "You can now list all images that are available to you. Use the 'all' " "visibility option." msgid "" "You may set the ``timeout`` option in the ``keystone_authtoken`` group in " "the **glance-api.conf** file." msgstr "" "You may set the ``timeout`` option in the ``keystone_authtoken`` group in " "the **glance-api.conf** file." msgid "You may upgrade these definitions using:" msgstr "You may upgrade these definitions using:" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "[`Community Goal `_] Support has been added for developers to write pre-upgrade " "checks. Operators can run these checks using ``glance-status upgrade " "check``. This allows operators to be more confident when upgrading their " "deployments by having a tool that automates programmable checks against the " "deployment configuration or dataset." msgstr "" "[`Community Goal `_] Support has been added for developers to write pre-upgrade " "checks. Operators can run these checks using ``glance-status upgrade " "check``. This allows operators to be more confident when upgrading their " "deployments by having a tool that automates programmable checks against the " "deployment configuration or dataset." msgid "`Bug #1972666 `_:" msgstr "`Bug #1972666 `_:" msgid "" "`Bug #1979699 `_: Fix the " "``glance-cache-prefetcher`` command to set up access to backend stores when " "the multi store feature is used." msgstr "" "`Bug #1979699 `_: Fix the " "``glance-cache-prefetcher`` command to set up access to backend stores when " "the multi store feature is used." msgid "" "`Bug #2054575 `_: Fixed the " "issue when cinder uploads a volume to glance in the optimized path and " "glance rejects the request with invalid location. Now we convert the old " "location format sent by cinder into the new location format supported by " "multi store, hence allowing volumes to be uploaded in an optimized way." msgstr "" "`Bug #2054575 `_: Fixed the " "issue when Cinder uploads a volume to Glance in the optimised path and " "Glance rejects the request with invalid location. Now we convert the old " "location format sent by Cinder into the new location format supported by " "multi store, hence allowing volumes to be uploaded in an optimised way." msgid "" "`Bug #2059809 `_: Fixed " "issue where a qcow2 format image with an external data file could expose " "host information. Such an image format with an external data file will be " "rejected from glance. To achieve the same, format_inspector has been " "extended by adding safety checks for qcow2 and vmdk files in glance. Unsafe " "qcow and vmdk files will be rejected by pre-examining them with a format " "inspector to ensure safe configurations prior to any qemu-img operations." msgstr "" "`Bug #2059809 `_: Fixed " "issue where a qcow2 format image with an external data file could expose " "host information. Such an image format with an external data file will be " "rejected from Glance. To achieve the same, format_inspector has been " "extended by adding safety checks for qcow2 and VMDK files in Glance. Unsafe " "qcow and VMDK files will be rejected by pre-examining them with a format " "inspector to ensure safe configurations prior to any qemu-img operations." msgid "" "`Bug #2073945 `_: Fixed " "issue with VM creation in DCN cases with RBD backend where an edge node " "doesn't have the store defined which is part of the image locations and the " "operation fails." msgstr "" "`Bug #2073945 `_: Fixed " "issue with VM creation in DCN cases with RBD backend where an edge node " "doesn't have the store defined which is part of the image locations and the " "operation fails." msgid "" "``all_stores_must_succeed``: Control wether the import have to succeed in " "all stores." msgstr "" "``all_stores_must_succeed``: Control whether the import have to succeed in " "all stores." msgid "``all_stores``: To import the data in all configured stores." msgstr "``all_stores``: To import the data in all configured stores." msgid "``delayed_delete``" msgstr "``delayed_delete``" msgid "``delete_metadef_namespace``" msgstr "``delete_metadef_namespace``" msgid "``delete_metadef_object``" msgstr "``delete_metadef_object``" msgid "``delete_metadef_tag``" msgstr "``delete_metadef_tag``" msgid "``delete_metadef_tags``" msgstr "``delete_metadef_tags``" msgid "``enable_image_import``" msgstr "``enable_image_import``" msgid "``enable_image_import`` is **True** by default (in Pike it was False)" msgstr "``enable_image_import`` is **True** by default (in Pike it was False)" msgid "" "``glance-cache-manage`` and precaching is back; during the cycle we fixed " "issues that caused cache management being impossible. The prefetcher code " "was moved into glance-api to break the dependency to glance-registry and " "does not run under cron anymore." msgstr "" "``glance-cache-manage`` and precaching is back; during the cycle we fixed " "issues that caused cache management being impossible. The prefetcher code " "was moved into glance-api to break the dependency to glance-registry and " "does not run under cron any more." msgid "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgstr "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgid "" "``glance-scrubber`` now support to restore the image's status from " "`pending_delete` to `active`. The usage is `glance-scrubber --restore `. Please make sure the ``glance-scrubber`` daemon is stopped before " "restoring the image to avoid image data inconsistency." msgstr "" "``glance-scrubber`` now support to restore the image's status from " "`pending_delete` to `active`. The usage is `glance-scrubber --restore `. Please make sure the ``glance-scrubber`` daemon is stopped before " "restoring the image to avoid image data inconsistency." msgid "``node_staging_uri``" msgstr "``node_staging_uri``" msgid "" "``os_glance_failed_import``: Each time an import in a store fails, it is " "added to this list." msgstr "" "``os_glance_failed_import``: Each time an import in a store fails, it is " "added to this list." msgid "" "``os_glance_importing_to_stores``: list of stores that has not yet been " "processed." msgstr "" "``os_glance_importing_to_stores``: list of stores that has not yet been " "processed." msgid "" "``os_hash_algo`` - this contains the name of the secure hash algorithm used " "to generate the value on this image" msgstr "" "``os_hash_algo`` - this contains the name of the secure hash algorithm used " "to generate the value on this image" msgid "" "``os_hash_value`` - this is the hexdigest computed by applying the secure " "hash algorithm named in the ``os_hash_algo`` property to the image data" msgstr "" "``os_hash_value`` - this is the hexdigest computed by applying the secure " "hash algorithm named in the ``os_hash_algo`` property to the image data" msgid "``remove_metadef_property``" msgstr "``remove_metadef_property``" msgid "``remove_metadef_resource_type_association``" msgstr "``remove_metadef_resource_type_association``" msgid "``scrub_pool_size``" msgstr "``scrub_pool_size``" msgid "``scrub_time``" msgstr "``scrub_time``" msgid "" "``stores``: List containing the stores id to import the image binary data to." msgstr "" "``stores``: List containing the stores id to import the image binary data to." msgid "``wakeup_time``" msgstr "``wakeup_time``" msgid "" "a new *list stores* call, `GET /v2/info/stores `_" msgstr "" "a new *list stores* call, `GET /v2/info/stores `_" msgid "" "a new ``OpenStack-image-store-ids`` header in the `create image `_ response" msgstr "" "a new ``OpenStack-image-store-ids`` header in the `create image `_ response" msgid "" "an ``X-Image-Meta-Store`` header may be included with the `image data upload " "`_ request" msgstr "" "an ``X-Image-Meta-Store`` header may be included with the `image data upload " "`_ request" msgid "" "an ``X-Image-Meta-Store`` header may be included with the `image import " "`_ request" msgstr "" "an ``X-Image-Meta-Store`` header may be included with the `image import " "`_ request" msgid "bug 1532243: glance fails silently if a task flow can not be loaded" msgstr "bug 1532243: glance fails silently if a task flow can not be loaded" msgid "" "bug 1533949: Glance tasks missing configuration item \"conversion_format\"" msgstr "" "bug 1533949: Glance tasks missing configuration item \"conversion_format\"" msgid "" "bug 1535231: md-meta with case insensitive string has problem during creating" msgstr "" "bug 1535231: md-meta with case insensitive string has problem during creating" msgid "bug 1543937: db-purge fails for very large number" msgstr "bug 1543937: db-purge fails for very large number" msgid "bug 1555275: Tags set changes on delete" msgstr "bug 1555275: Tags set changes on delete" msgid "bug 1557495: Possible race conditions during status change" msgstr "bug 1557495: Possible race conditions during status change" msgid "bug 1558683: Versions endpoint does not support X-Forwarded-Proto" msgstr "bug 1558683: Versions endpoint does not support X-Forwarded-Proto" msgid "bug 1568723: secure_proxy_ssl_header not in sample configuration files" msgstr "bug 1568723: secure_proxy_ssl_header not in sample configuration files" msgid "" "bug 1568894: glance_store options missing in glance-scrubber.conf and glance-" "cache.conf sample files" msgstr "" "bug 1568894: glance_store options missing in glance-scrubber.conf and glance-" "cache.conf sample files" msgid "" "bug 1570789: Metadefs API returns 500 error when 4 byte unicode character is " "passed" msgstr "" "bug 1570789: Metadefs API returns 500 error when 4 byte Unicode character is " "passed" msgid "" "bug 1580848: There's no exception when import task is created without " "properties" msgstr "" "bug 1580848: There's no exception when import task is created without " "properties" msgid "bug 1582304: Allow tests to run when http proxy is set" msgstr "bug 1582304: Allow tests to run when HTTP proxy is set" msgid "bug 1584076: Swift ACLs disappears on v1 Glance images" msgstr "bug 1584076: Swift ACLs disappears on v1 Glance images" msgid "" "bug 1584350: etc/glance-registry.conf sample file has redundant store section" msgstr "" "bug 1584350: etc/glance-registry.conf sample file has redundant store section" msgid "" "bug 1584415: Listing images with the created_at and updated_at filters fails " "if an operator is not specified" msgstr "" "bug 1584415: Listing images with the created_at and updated_at filters fails " "if an operator is not specified" msgid "bug 1585584: Glare v0.1 is unable to create public artifact draft" msgstr "bug 1585584: Glare v0.1 is unable to create public artifact draft" msgid "" "bug 1585917: member-create will raise 500 error if member-id is greater than " "255 characters" msgstr "" "bug 1585917: member-create will raise 500 error if member-id is greater than " "255 characters" msgid "" "bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo." "middleware library" msgstr "" "bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo." "middleware library" msgid "" "bug 1591004: Unable to download image with no checksum when cache is enabled" msgstr "" "bug 1591004: Unable to download image with no checksum when cache is enabled" msgid "" "bug 1593177: The default policy needs to be admin for safer default " "deployment scenarios" msgstr "" "bug 1593177: The default policy needs to be admin for safer default " "deployment scenarios" msgid "" "bug 1598985: glance-replicator compare output should show image name in " "addition to image id for missing images" msgstr "" "bug 1598985: glance-replicator compare output should show image name in " "addition to image id for missing images" msgid "" "bug 1599169: glance-replicator size raises object of type 'NoneType' has no " "len() exception when no args provided" msgstr "" "bug 1599169: glance-replicator size raises object of type 'NoneType' has no " "len() exception when no args provided" msgid "bug 1599192: glance-replicator needs to display human-readable size" msgstr "bug 1599192: glance-replicator needs to display human-readable size" msgid "bug 1602081: Glance needs to use oslo.context's policy dict" msgstr "bug 1602081: Glance needs to use oslo.context's policy dict" msgid "" "bug 1609571: version negotiation api middleware was NOT up to date to " "include v2.3" msgstr "" "bug 1609571: version negotiation API middleware was NOT up to date to " "include v2.3" msgid "bug 1612341: Add cpu thread pinning flavor metadef" msgstr "bug 1612341: Add CPU thread pinning flavour metadef" msgid "bug 1617258: Image signature base64 needs to wrap lines" msgstr "bug 1617258: Image signature base64 needs to wrap lines" msgid "the options in the ``[task]`` group" msgstr "the options in the ``[task]`` group" msgid "the options in the ``[taskflow_executor]`` group" msgstr "the options in the ``[taskflow_executor]`` group" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7862947 glance-29.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000020700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000022465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000036150600000000000025531 0ustar00zuulzuul00000000000000# Akihito INOH , 2018. #zanata # Shu Muto , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Glance Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-03-01 15:42+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-15 08:08+0000\n" "Last-Translator: \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "'community' - the image is available for consumption by all users" msgstr "'community' - すべてのユーザーがイメージを利用できます。" msgid "'private' - the image is accessible only to its owner" msgstr "'private' - 所有者だけがイメージにアクセスできます。" msgid "" "'public' - reserved by default for images supplied by the operator for the " "use of all users" msgstr "" "'public' - すべてのユーザーにオペレーターから提供されるイメージで、デフォルト" "で設定されます。" msgid "" "'shared' - the image is completely accessible to the owner and available for " "consumption by any image members" msgstr "" "'shared' - 所有者は完全なアクセス権を持ち、メンバーが利用可能なイメージです。" msgid "" "**Adding** locations is disallowed on the following image statuses - " "``saving``, ``deactivated``, ``deleted``, ``pending_delete``, ``killed``." msgstr "" "次の状態のイメージには、場所の**追加**は許可されていません。``saving``、" "``deactivated``、``deleted``、``pending_delete``、``killed``" msgid "" "**Experimental** zero-downtime database upgrade using an expand-migrate-" "contract series of operations is available." msgstr "" "**実験的** expand-migrate-contract の一連の操作を使用した停止時間なしのデータ" "ベースアップグレードが利用可能です。" msgid "" "*File system store operators*: the old name, now **DEPRECATED**, was " "``filesystem``. The **new** name, used in both glance and glance_store, is " "``file``" msgstr "" "*ファイルシステムストアオペレーター*: 現在**廃止予定**の古い名前は、" "``filesystem`` でした。glance と glance_store の両方で使用される**新しい**名" "前は、 ``file`` です。" msgid "" "*VMware datastore operators*: The old name, now **DEPRECATED**, was " "``vmware_datastore``. The **new** name, used in both glance and " "glance_store, is ``vmware``" msgstr "" "*VMware データストアオペレーター*: 現在**廃止予定**の古い名前は、" "``vmware_datastore`` でした。glance と glance_store の両方で使用される**新し" "い**名前は、 ``vmware`` です。" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.2" msgstr "11.0.2" msgid "12.0.0" msgstr "12.0.0" msgid "12.0.0-20" msgstr "12.0.0-20" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "15.0.0" msgstr "15.0.0" msgid "15.0.1" msgstr "15.0.1" msgid "" "A new interoperable image import method, ``web-download`` is introduced." msgstr "" "新しい相互運用可能なイメージインポート方法、``web-download`` が導入されまし" "た。" msgid "" "A new interoperable image import method, ``web-download`` is introduced. " "This method allows an end user to import an image from a remote URL. The " "image data is retrieved from the URL and stored in the Glance backend. (In " "other words, this is a **copy-from** operation.)" msgstr "" "新しい相互運用可能なイメージインポート方法、``web-download`` が導入されまし" "た。この方法によって、エンドユーザがリモートの URL からイメージをインポートで" "きます。イメージデータは URL から取得され、Glance のバックエンドに保存されま" "す。 (言い換えると、 **copy-from** 操作です。)" msgid "" "A new policy, ``tasks_api_access`` has been introduced so that ordinary user " "credentials may be used by Glance to manage the tasks that accomplish the " "interoperable image import process without requiring that operators expose " "the Tasks API to end users." msgstr "" "新しいポリシー ``tasks_api_access`` が導入され、Tasks API をエンドユーザーに" "公開することなく、相互運用可能なイメージのインポートプロセスを行うタスクを管" "理するのに、一般的なユーザー認証情報を Glance が使用できます。" msgid "" "A new value for the Image 'visibility' field, 'community', is introduced." msgstr "" "イメージの 'visibility' フィールドに、新しい値 'community' が導入されました。" msgid "" "A new value for visibility, 'shared', is introduced. Images that have or " "can accept members will no longer be displayed as having 'private' " "visibility, reducing confusion among end users." msgstr "" "可視性の新しい値 'shared' が導入されました。所有、あるいはメンバーがアクセス" "できるイメージは、可視性 'private' としては表示されず、エンドユーザーとの間で" "の混乱を減少します。" msgid "" "A preview of zero-downtime database upgrades is available in this release, " "but it is **experimental** and **not supported for production systems**. " "Please consult the `Database Management`_ section of the Glance " "documentation for details." msgstr "" "このリリースでは、停止時間なしのデータベースアップグレードのプレビューが利用" "可能ですが、これは**実験的**で**本番環境ではサポートされません**。詳細は、" "Glance ドキュメンテーションの`データベース管理`_ セクションを参照してくださ" "い。" msgid "" "A recent change to oslo.log (>= 3.17.0) set the default value of ``[DEFAULT]/" "use_stderr`` to ``False`` in order to prevent duplication of logs (as " "reported in bug \\#1588051). Since this would change the current behaviour " "of certain glance commands (e.g., glance-replicator, glance-cache-manage, " "etc.), we chose to override the default value of ``use_stderr`` to ``True`` " "in those commands. We also chose not to override that value in any Glance " "service (e.g., glance-api, glance-registry) so that duplicate logs are not " "created by those services. Operators that have a usecase that relies on logs " "being reported on standard error may set ``[DEFAULT]/use_stderr = True`` in " "the appropriate service's configuration file upon deployment." msgstr "" "oslo.log (>= 3.17.0) の最近の変更では、ログの重複(bug \\#1588051)を防ぐため" "に、``[DEFAULT]/use_stderr`` のデフォルト値が ``False`` に設定されています。" "この変更が特定の\n" " glance コマンド(例えば、glance-replicator、glance-cache-manage、など)現在" "の動作を変更するので、これらのコマンドでは ``use_stderr`` のデフォルト値を " "``True`` で上書きしています。また、Glance サービス(例えば、glance-api、" "glance-registry)では、ログの重複は起こらないので、この値を上書きしていませ" "ん。標準エラーに出力されるログに依存するユースケースを持つオペレーターは、構" "成上の適切なサービスの設定ファイルに ``[DEFAULT]/use_stderr = True`` を設定し" "てください。" msgid "" "A return code of ``0`` means you are currently up to date with the latest " "migration script version and all ``db`` upgrades are complete." msgstr "" "返却コード ``0`` は、最新のマイグレーションスクリプトバージョンで更新され、す" "べての ``db`` アップグレードが完了したことを意味します。" msgid "" "A return code of ``3`` means that an upgrade from your current database " "version is available and your first step is to run ``glance-manage db " "expand``." msgstr "" "返却コード ``3`` は、現在のデータベースバージョンからのアップグレード可能であ" "ることを意味し、最初のステップとして ``glance-manage db expand`` を実行してく" "ださい。" msgid "" "A return code of ``4`` means that the expansion stage is complete, and the " "next step is to run ``glance-manage db migrate``." msgstr "" "返却コード ``4`` は、拡張ステージが完了したことを意味し、次のステップとして " "``glance-manage db migrate`` を実行してください。" msgid "" "A return code of ``5`` means that the expansion and data migration stages " "are complete, and the next step is to run ``glance-manage db contract``." msgstr "" "返却コード ``5`` は、拡張、およびデータ移行ステージが完了したことを意味し、次" "のステップとして ``glance-manage db contract`` を実行してください。" msgid "Accept the Range header in requests to serve partial images." msgstr "部分イメージを提供するリクエストで Range ヘッダーを受け付けます。" msgid "Add ``ploop`` to the list of supported disk formats." msgstr "サポートされるディスク形式の一覧に ``ploop`` を追加しました。" msgid "Add ``vhdx`` to list of supported disk format." msgstr "サポートされるディスク形式の一覧に ``vhdx`` を追加しました。" msgid "" "Added a new command ``glance-manage db check``, the command will allow a " "user to check the status of upgrades in the database." msgstr "" "新しいコマンド ``glance-manage db check`` を追加しました。このコマンドで、" "データベースのアップグレードの状態をチェックできます。" msgid "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process." msgstr "" "相互運用可能なイメージのインポートプロセスを介して作成される非管理者イメージ" "に、イメージのメタデータプロパティを注入するプラグインを追加しました。" msgid "" "Added a plugin to inject image metadata properties to non-admin images " "created via the interoperable image import process. This plugin implements " "the spec `Inject metadata properties automatically to non-admin images`_. " "See the spec for a discussion of the use case addressed by this plugin." msgstr "" "相互運用可能なイメージのインポートプロセスを介して作成される非管理者イメージ" "に、イメージのメタデータプロパティを注入するプラグインを追加しました。このプ" "ラグインは、スペック `Inject metadata properties automatically to non-admin " "images`_ を実装しています。このプラグインが扱うユースケースの検討には、スペッ" "クを参照してください。" msgid "" "Added additional metadata for CPU thread pinning policies to 'compute-cpu-" "pinning.json'. Use the ``glance_manage`` tool to upgrade." msgstr "" "CPU スレッドピニングのポリシーに関する追加メタデータを 'compute-cpu-pinning." "json' に追加しました。アップグレードには、``glance_manage`` ツールを使用して" "ください。" msgid "" "Adding locations to a non-active or non-queued image is no longer allowed." msgstr "" "非アクティブ、あるいはキューに入っていないイメージへの場所の追加は許可されな" "くなりました。" msgid "" "Additional values were added to the enumeration for the `hw_disk_bus`_ " "property in the ``OS::Compute::LibvirtImage`` namespace." msgstr "" "``OS::Compute::LibvirtImage`` 名前空間の `hw_disk_bus`_ プロパティの列挙子に" "新しい値が追加されました。" msgid "" "Additionally, you will need to verify that the task-related policies in the " "Glance policy.json file are set correctly. These settings are described " "below." msgstr "" "さらに、Glance の policy.json ファイルにあるタスクに関連するポリシーが正しく" "設定されていることを確認する必要があります。これらの設定は、以下に記述されて" "います。" msgid "" "All ``qemu-img info`` calls are now run under resource limitations that " "limit the CPU time and address space usage of the process running the " "command to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks, which by default (since the Mitaka release) are " "only available to admin users. We continue to recommend that tasks only be " "exposed to trusted users" msgstr "" "すべての ``qemu-img info`` 呼び出しは、コマンドを実行するプロセスの CPU 時間" "とアドレス空間使用量を、それぞれ2 秒および 1GB に制限されたリソース制限の下で" "実行されます。これは、バグ https://bugs.launchpad.net/glance/+bug/1449062 に" "対応しています。\"qemu-img\" の現在の使用は、Glance のタスクに限定されおり、" "(Mitaka リリース以降) デフォルトでは、管理者ユーザーのみが利用できます。タス" "クは信用できるユーザーのみに公開することを引き続き推奨します。" msgid "" "All ``qemu-img info`` calls will be run under resource limitations that " "limit the CPU time and address space usage of the process if oslo." "concurrency is at least version 2.6.1. ``qemu-img info`` calls are now " "limited to 2 seconds and 1 GB respectively. This addresses the bug https://" "bugs.launchpad.net/glance/+bug/1449062 Current usage of \"qemu-img\" is " "limited to Glance tasks. In the Mitaka release, tasks by default will only " "be available to admin users. In general, we recommend that tasks only be " "exposed to trusted users, even in releases prior to Mitaka." msgstr "" "すべての ``qemu-img info`` 呼び出しは、oslo.concurrency が 2.6.1 以降の場合、" "コマンドを実行するプロセスの CPU 時間とアドレス空間使用量を、それぞれ2 秒およ" "び 1GB に制限されたリソース制限の下で実行されます。これは、バグ https://bugs." "launchpad.net/glance/+bug/1449062 に対応しています。\"qemu-img\" の現在の使用" "は、Glance のタスクに限定されています。Mitaka リリースでは、デフォルトでは、" "管理者ユーザーのみが利用できます。Mitaka 以前のリリースにおいても、通常、タス" "クは信用できるユーザーのみに公開することを推奨します。" msgid "" "All images currently with 'public' visibility (that is, images for which " "'is_public' is True in the database) will have their visibility set to " "'public'." msgstr "" "可視性 'public' (データベース上で 'is_public' が True のイメージ) を持つすべ" "てのイメージは、可視性を 'public' に設定されます。" msgid "" "All the ``glance manage db`` commands are changed appropriately to use " "Alembic to perform operations such as ``version``, ``upgrade``, ``sync`` and " "``version_control``. Hence, the \"old-style\" migration scripts will no " "longer work with the Ocata glance manage db commands." msgstr "" "すべての ``glance manage db`` コマンドは、``version``、``upgrade``、" "``sync``、``version_control``などの操作を行うために、Alembicを使うよう適切に" "変更されました。したがって、旧式の移行スクリプトは、Ocata 版の glance manage " "db コマンドでは動作しません。" msgid "" "Although support has been added for Glance to be run as a WSGI application " "hosted by a web server, the atypical nature of the Images APIs provided by " "Glance, which enable transfer of copious amounts of image data, makes it " "difficult for this approach to work without careful configuration. Glance " "relies on the use of chunked transfer encoding for image uploads, and the " "support of chunked transfer encoding is not required by the `WSGI " "specification`_." msgstr "" "Glance をウェブサーバーにホストされる WSGI アプリケーションとして稼働させるサ" "ポートが追加されましたが、Glance から提供される Image API の非定型的な性質に" "より、大量のイメージデータを転送できるため、このアプローチは注意深く設定する" "ことなく動作させるのは困難です。Glance は、イメージアップロードをチャンク転送" "符号化の使用に依存しており、チャンク転送符号化のサポートは、`WSGI 仕様`_ には" "要求されていません。" msgid "" "An **EXPERIMENTAL** version of the Images API supplied by Glance is " "introduced as **2.6**. It includes the new API calls introduced for the " "`refactored image import`_ functionality. This functionality is **not** " "enabled by default, so the CURRENT version of the Images API remains at " "2.5. There are no changes to the version 2.5 API in this release, so all " "version 2.5 calls will work whether or not the new import functionality is " "enabled or not." msgstr "" "Glance によって提供される Image API の**実験的**バージョンは、**2.6** として" "紹介されています。これには、`refactored image import`_ 機能のために導入された" "新しい API 呼び出しが含まれています。この機能はデフォルトで有効になって**いま" "せん**ので、現行バージョンの Images API は 2.5 のままです。このリリースのバー" "ジョン 2.5 API には変更がないため、新しいインポート機能が有効かどうかに関わら" "ず、バージョン 2.5 のすべての呼び出しが機能します。" msgid "" "An enumeration of values was added for the `vmware:hw_version`_ property in " "the ``OS::Compute::VMwareFlavor`` namespace." msgstr "" "``OS::Compute::VMwareFlavor`` 名前空間の `vmware:hw_version`_ プロパティに列" "挙子が追加されました。" msgid "" "An image must have 'shared' visibility in order to accept members. This " "provides a safeguard from 'private' images being shared inadvertently." msgstr "" "メンバーを許諾するには、イメージは `shared` 可視性を持つ必要があります。これ" "は、'private' イメージが誤って共有されてしまうことから保護します。" msgid "" "An image that has 'community' visibility in the v2 API will have " "``is_public`` == False in the v1 API. It will behave like a private image, " "that is, only the owner (or an admin) will have access to the image, and " "only the owner (or an admin) will see the image in the image-list response." msgstr "" "v2 APIで 'community' 可視性を持つイメージは、v1 API で ``is_public`` == " "False になります。 プライベートイメージのように振る舞います。つまり、所有者" "(または管理者)だけがイメージにアクセスでき、所有者(または管理者)だけが " "image-list 応答でイメージを表示できます。" msgid "" "An image with 'community' visibility is available for consumption by any " "user." msgstr "'community' 可視性を持つイメージは、すべてのユーザーが利用できます。" msgid "" "As far as the Glance team can determine, the difficulties running Glance as " "a WSGI application are caused by issues external to Glance. Thus the Glance " "team recommends that Glance be run in its normal standalone configuration, " "particularly in production environments. If you choose to run Glance as a " "WSGI application in a web server, be sure to test your installation " "carefully with realistic usage scenarios." msgstr "" "Glance チームが判断する限り、Glance を WSGI アプリケーションとして稼働させる" "難しさは、Glance の外部の問題に起因します。よって、 Glance チームは、特に本番" "環境では、通常のスタンドアローンの設定で動作させることを推奨します。もし、" "Glance を WSGI アプリケーションとしてウェブサーバーで稼働することを選択した場" "合、現実的なユースケースシナリオとともに、注意深く、確実に構成をテストしてく" "ださい。" msgid "" "As is standard behavior for the image-list call, other filters may be " "applied to the request. For example, to see the community images supplied " "by user ``931efe8a-0ad7-4610-9116-c199f8807cda``, the following call would " "be made: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-" "c199f8807cda``" msgstr "" "image-list の通常の動作として、リクエストに他のフィルターが適用される可能性が" "あります。例えば、``931efe8a-0ad7-4610-9116-c199f8807cda`` のユーザーから提供" "されたコミュニティのイメージを参照するために、次のようなリクエストが作成され" "ます: ``GET v2/images?visibility=community&owner=931efe8a-0ad7-4610-9116-" "c199f8807cda``" msgid "" "As mentioned above, the default visibility of an image is 'shared'. If a " "user wants an image to be private and not accept any members, a visibility " "of 'private' can be explicitly assigned at the time of creation." msgstr "" "前述のように、イメージのデフォルトの可視性は 'shared' です。ユーザーがイメー" "ジをプライベートにしてメンバーを受け入れたくない場合は、作成時に 'private' の" "可視性を明示的に割り当てることができます。" msgid "" "As mentioned above, the same recommendation applies to the policy-based " "configuration for exposing multiple image locations." msgstr "" "前述のように、複数のイメージの場所を公開する policy-based 設定にも同じ推奨が" "適用されます。" msgid "" "Attempting to set image locations to an image *not* in ``active`` or " "``queued`` status will now result in a HTTP Conflict (HTTP status code 409) " "to the user." msgstr "" "イメージの場所を ``active`` または ``queued`` ステータスでは*ない*イメージに" "設定しようとすると、HTTP Conflict (HTTP ステータスコード 409) がユーザに表示" "されます。" msgid "Bug 1229823_: Handle file delete races in image cache" msgstr "Bug 1229823_: イメージキャッシュのファイル削除競合を処理する" msgid "Bug 1482129_: Remove duplicate key from dictionary" msgstr "Bug 1482129_: 辞書から重複したキーを削除する" msgid "Bug 1483353 v1 Updates using x-image-meta-id header provoke E500 or 200" msgstr "" "Bug 1483353 x-image-meta-id ヘッダーを使用した更新が 500 エラーあるいは 200 " "を誘発する" msgid "Bug 1504184 Glance does not error gracefully on token validation error" msgstr "Bug 1504184 Glance はトークンの検証エラーで正常にエラーになりません" msgid "" "Bug 1505474 Glance raise 500 error when delete images with unallowed status " "change" msgstr "" "Bug 1505474 許可されていないステータス変更を伴うイメージ削除をすると、Glance " "が 500 エラーを発生させる" msgid "" "Bug 1505675 Flaky tasks test glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time" msgstr "" "Bug 1505675 tasks のテスト glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time の修正" msgid "Bug 1505710 Wrong logging setup in replicator" msgstr "Bug 1505710 レプリケーターのロギングセットアップの修正" msgid "" "Bug 1512369 glance should declare a test-requirements.txt on swiftclient " "(for config generator)" msgstr "" "Bug 1512369 glance は設定生成のために swiftclient の test-requirements.txt を" "定義すべき" msgid "Bug 1516706_: Prevent v1_api from making requests to v2_registry" msgstr "Bug 1516706_: v1_api から v2_registry へのリクエスト作成の防止" msgid "" "Bug 1517060 Users (without admin privileges) can change ACTIVE_IMMUTABLE " "properties of their own images when deactivated." msgstr "" "Bug 1517060 所有するイメージが無効にされた時、ユーザー(管理者権限なし)が " "ACTIVE_IMMUTABLE プロパティを変更できる件を修正" msgid "" "Bug 1522132 Scrubber tests are broken due to deprecated config " "filesystem_store_datadir under DEFAULT section" msgstr "" "Bug 1522132 廃止予定となった設定、DEFAULT セクションの " "filesystem_store_datadir によって、スクラバーテストが壊れている件を修正" msgid "Bug 1554412_: Provide user friendly message for FK failure" msgstr "" "Bug 1554412_: FK の失敗に対する、ユーザーフレンドリーなメッセージの提供" msgid "Bug 1655727_: Invoke monkey_patching early enough for eventlet 0.20.1" msgstr "Bug 1655727_: eventlet 0.20.1 に十分間に合う monkey_patching 呼び出し" msgid "Bug 1657459_: Fix incompatibilities with WebOb 1.7" msgstr "Bug 1657459_: WebOb 1.7 との非互換性の修正" msgid "Bug 1664709_: Do not serve partial image download requests from cache" msgstr "" "Bug 1664709_: キャッシュから部分イメージダウンロードリクエストを提供しない" msgid "Bug 1686488_: Fix glance image-download error" msgstr "Bug 1686488_: glance image-download エラーの修正" msgid "Bug 1701346_: Fix trust auth mechanism" msgstr "Bug 1701346_: 信頼認証メカニズムの修正" msgid "Bug Fixes" msgstr "バグ修正" msgid "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Ocata release of OpenStack uses eventlet 0.19.0.) " "See Bug 1747305_ for details." msgstr "" "Python 2.7 配布パッケージの変更により、Glance での eventlet の使用が影響を受" "けました。その結果、チームは eventlet 0.22.0から Glance のコードに修正をバッ" "クポートしました。OpenStack の Ocata リリースでは、eventlet 0.19.0 が使用され" "ています。詳細は、Bug 1747305_ を参照してください。" msgid "" "Changes in Python 2.7 distribution packages affected Glance's use of " "eventlet. As a result, the team backported a fix from eventlet 0.22.0 to " "the Glance code. (The Pike release of OpenStack uses eventlet 0.20.0.) See " "Bug 1747304_ for details." msgstr "" "Python 2.7 配布パッケージの変更により、Glance での eventlet の使用が影響を受" "けました。その結果、チームは eventlet 0.22.0から Glance のコードに修正をバッ" "クポートしました。OpenStack の Pike リリースでは、eventlet 0.20.0 が使用され" "ています。詳細は、Bug 1747304_ を参照してください。" msgid "" "Code for the OpenStack Artifacts Service (Glare) and its EXPERIMENTAL API " "has been `removed`_ from the Glance codebase." msgstr "" "OpenStack Artifacts サービス (Glare) のコードとその実験的 API は Glance の" "コードから`削除`_されました。" msgid "" "Code for the OpenStack Artifacts Service (`Glare`_) and its EXPERIMENTAL API " "has been removed from the Glance codebase, as it was relocated into an " "independent `Glare`_ project repository during a previous release cycle. The " "database upgrade for the Glance Pike release drops the Glare tables (named " "'artifacts' and 'artifact_*') from the Glance database." msgstr "" "OpenStack Artifacts サービス (`Glare`_) のコードとその実験的 API は Glance の" "コードから`削除`_され、前のリリースサイクルで独立の `Glare`_ プロジェクトのリ" "ポジトリーに再配置されました。Glance の Pike リリースのデータベースアップグ" "レードは、Glare テーブル('artifacts' および 'artifact_*')を Glance データ" "ベースから削除します。" msgid "Critical Issues" msgstr "致命的な問題" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "" "Currently, we are experiencing some problems in the gate when Glance is " "configured to run in devstack following the guidelines recommended in the " "documentation. You can follow `Bug 1703856`_ to learn more." msgstr "" "現在、ドキュメントで推奨されているガイドラインに従って Glance が devstack で" "実行されるように設定されている場合、ゲートでいくつかの問題が発生しています。" "`Bug 1703856`_ に従って詳細を知ることができます。" msgid "Database downgrades have been removed from the Glance source tree." msgstr "データベースダウングレードは、Glance のソースから削除されました。" msgid "" "Database versions are no longer numerical. Instead, they are the *revision " "ID* of the last migration applied on the database." msgstr "" "データベースバージョンは数値ではなくなりました。データベースに適用された最後" "のマイグレーションの*リビジョン ID* になります。" msgid "" "Deprecate the ``show_multiple_locations`` configuration option in favor of " "the existing Role Based Access Control (RBAC) for Image locations which uses " "``policy.json`` file to define the appropriate rules." msgstr "" "イメージの場所に、適切なロールを定義する ``policy.json`` ファイルを使用した、" "既存の ロールベースアクセス制御 (RBAC) を使用し、``show_multiple_locations`` " "設定オプションを廃止予定にしました。" msgid "" "Deprecated \"sign-the-hash\" approach for image signing. Old run_tests and " "related scripts have been removed." msgstr "" "イメージの署名の \"sign-the-hash\" アプローチを廃止予定にしました。古い " "run_tests と関連するスクリプトが削除されました。" msgid "" "Deprecated values are no longer recognized for the configuration option " "``store_type_preference``. The two non-standard values 'filesystem' and " "'vmware_datastore' were DEPRECATED in Newton and are no longer operable. The " "correct values for those stores are 'file' and 'vmware'. See the Newton " "release notes for more information at https://docs.openstack.org/" "releasenotes/glance/newton.html#upgrade-notes" msgstr "" "廃止予定の値は、設定オプション ``store_type_preference`` では認識されなくなり" "ました。二つの標準でない値 'filesystem' と 'vmware_datastore' は Newton で廃" "止予定となり、もはや動作不能です。それらのストアの正しい値は 'file' と " "'vmware' です。 詳細については、Newton のリリースノート(https://docs." "openstack.org/releasenotes/glance/newton.html#upgrade-notes)を参照してくださ" "い。" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "Expired tasks are now deleted in Glance. As with other Glance resources, " "this is a \"soft\" deletion, that is, a deleted task is marked as " "``deleted`` in the database so that the task will not appear in API " "responses, but the information associated with the task persists in the " "database." msgstr "" "期限切れのタスクは Glance で削除されるようになりました。他の Glance リソース" "と同様、これは \"ソフト\"削除です。つまり、削除されたタスクはデータベース内" "で ``deleted`` とマークされ、タスクは API レスポンスには表示されませんが、タ" "スクに関連する情報はデータベースに格納されています。" msgid "Expired tasks are now deleted." msgstr "期限切れのタスクは削除されました。" msgid "" "Finally, there are no changes to the version 2.5 API in this release. All " "version 2.5 calls will work whether the new import functionality is enabled " "or not." msgstr "" "結果的に、このリリースでは、バージョン 2.5 API から変更はありません。新しいイ" "ンポート機能が有効かどうかに関わらず、すべてのバージョン 2.5 API 呼び出しが機" "能します。" msgid "" "Fixing bug 1525915; image might be transitioning from active to queued by " "regular user by removing last location of image (or replacing locations with " "empty list). This allows user to re-upload data to the image breaking " "Glance's promise of image data immutability. From now on, last location " "cannot be removed and locations cannot be replaced with empty list." msgstr "" "bug 1525915 を修正しました。一般ユーザーによるイメージの最後の場所の削除(ま" "たは空のリストでの置換)により、イメージをアクティブからキューに入っている状" "態になることがあります。これにより、ユーザーはデータを再アップロードすること" "ができ、Glance のイメージデータの不変性を破壊します。これ以降、最後の場所を削" "除することはできず、場所を空のリストに置き換えることはできません。" msgid "For example, ``GET v2/images?visibility=community``" msgstr "例えば、 ``GET v2/images?visibility=community``" msgid "" "For example, a user is not allowed to add a location to an image in " "``saving`` status. Suppose a user decides to add a location anyway. It is " "possible that before the user's request is processed, the transmission of " "data being saved is completed and the image transitioned into ``active`` " "status, in which case the user's add location request will succeed. To the " "user, however, this success will appear anomalous because in most cases, an " "attempt to add a location to an image in ``saving`` status will fail." msgstr "" "例えば、ユーザは、``saving`` 状態にあるイメージに場所を追加することはできませ" "ん。とにかくユーザーが場所を追加することを決めたとします。ユーザーのリクエス" "トが処理される前に、保存されているデータの送信が完了し、イメージが " "``active`` 状態に移行する可能性があり、その場合、ユーザーの場所の追加のリクエ" "ストは成功します。しかしながら、ほとんどの場合 ``saving`` 状態のイメージに場" "所を追加しようとする試みは失敗するため、この成功は変則的に見えます。" msgid "" "For example, configuration options specifying counts, where a negative value " "was undefined, would have still accepted the supplied negative value. Such " "options will no longer accept negative values." msgstr "" "例えば、負の値が未定義のカウントを指定する設定オプションは、与えられた負の値" "を受け付けました。このようなオプションは負の値を受け付けなくなります。" msgid "" "For example, the Liberty migration, which was version ``42`` under the old " "system, will now appear as ``liberty``. The Mitaka migrations ``43`` and " "``44`` appear as ``mitaka01`` and ``mitaka02``, respectively." msgstr "" "例えば、古いシステムでバージョン ``42`` のLiberty のマイグレーションは、" "``liberty`` として表示されます。``43`` および ``44`` の Mitaka のマイグレー" "ションは、それぞれ ``mitaka01`` および ``mitaka02`` として表示されます。" msgid "" "For more information, see the Glance specification document `Actually " "Deprecate the Glance Registry `_." msgstr "" "詳細は、Glance specification document `Actually Deprecate the Glance " "Registry `_ を参照してください。" msgid "" "For the Newton release, this option will still be honored. However, it is " "important to update ``policy.json`` file for glance-api nodes. In " "particular, please consider updating the policies ``delete_image_location``, " "``get_image_location`` and ``set_image_location`` as per your requirements. " "As this is an advanced option and prone to expose some risks, please check " "the policies to ensure security and privacy of your cloud." msgstr "" "Newton リリースでは、このオプションは引き続き適用されます。しかし、glance-" "api ノードの ``policy.json`` ファイルの更新は重要です。特に、ポリシー " "``delete_image_location``、``get_image_location`` および " "``set_image_location`` を要件に従って更新することを検討してください。これは高" "度なオプションであり、いくつかのリスクを露呈しやすいため、クラウドのセキュリ" "ティとプライバシーを確保するためのポリシーを確認してください。" msgid "" "For the Pike release, the legacy Glare code has been removed from the Glance " "code repository and the legacy 'artifacts' and 'artifact_*' database tables " "are dropped from the Glance database. As the Artifacts service API was an " "EXPERIMENTAL API in Glance and has not used the Glance database since " "Mitaka, no provision is made for migrating data from the Glance database to " "the Glare database." msgstr "" "Pike リリースでは、従来の Glare のコードが Glance レポジトリーから削除され、" "従来の\n" " 'artifacts' と 'artifact_*' データベーステーブルが Glance データベースから削" "除されました。Artifacts サービス API が Glance の実験的 API であり、Mitaka 以" "降 Glance データベースで使用されていなかったため、Glance データベースから " "Glare データベースへの移行に対する対処はありません。" msgid "" "Formerly, it was possible to add members to an image whose visibility was " "``private``, thereby creating a \"shared\" image. In this release, an image " "must have a visibility of ``shared`` in order to accept member operations. " "Attempting to add a member to an image with a visibility of ``private`` will " "result in a `4xx response`_ containing an informative message." msgstr "" "以前は、``private`` 可視性のイメージにメンバーを追加でき、それによって「共" "有」イメージを作成できました。このリリースでは、メンバーの操作を受け付けるに" "は、イメージは ``shared`` 可視性を持つ必要があります。``private`` 可視性を持" "つイメージにメンバーを追加しようとすると、有用なメッセージを含む `4xx " "response`_ が返されます。" msgid "" "Future releases will ignore this option and just follow the policy rules. It " "is recommended that this option is disabled for public endpoints and is used " "only internally for service-to-service communication." msgstr "" "将来のリリースでは、このオプションは無視され、ポリシールールのみに従います。" "公開するエンドポイントではこのオプションを無効にし、内部の service-to-" "service 通信のみに使用することを推奨します。" msgid "Glance API **CURRENT** ``minor`` version is now ``2.4``." msgstr "Glance API の **現在の** ``minor`` バージョンは ``2.4`` です。" msgid "Glance API ``minor`` version bumped to 2.4." msgstr "Glance API の ``minor`` バージョンは 2.4 に引き上げられました。" msgid "Glance Release Notes" msgstr "Glance リリースノート" msgid "" "Glance and Nova contain nearly identical digital signature modules. In order " "to better maintain and evolve this code and to eliminate the possibility " "that the modules diverge, we have replaced the digital signature module in " "Glance with the new ``cursive`` library." msgstr "" "Glance と Nova には、ほぼ同じ電子署名モジュールが含まれています。このコードを" "よりよく維持し発展させ、モジュールが発散する可能性を排除するために、Glance の" "電子署名モジュールを新しい ``cursive`` ライブラリに置き換えました。" msgid "" "Glance had been accepting the Content-Range header for GET v2/images/" "{image_id}/file requests, contrary to RFC 7233. Following RFC 7233, Glance " "will now:" msgstr "" "Glance は、RFC 7233 に反して GET v2/images/{image_id}/file リクエストで " "Content-Range ヘッダーを受け付けていました。Glance は RFC 7233 に追随するよう" "になり:" msgid "" "Glance is now packaged with a WSGI script entrypoint, enabling it to be run " "as a WSGI application hosted by a performant web server. See `Running " "Glance in HTTPD `_ in the Glance documentation for details." msgstr "" "Glance は、WSGI スクリプトエントリーポイントとともにパッケージされ、ウェブ" "サーバーにホストされる WSGI アプリケーションとして動作できるようになりまし" "た。詳細は\n" " Glance ドキュメンテーションの `Running Glance in HTTPD `_ を参照してください" msgid "" "Glance no longer returns a 500 when 4 byte unicode characters are passed to " "the metadefs API." msgstr "" "Glance は、4 バイトの unicode 文字列が metadef API に渡された時に、 500 エ" "ラーを返さないようになりました。" msgid "" "Glance now uses the `python 'cryptography' module`_ instead of the " "'pycrypto' module." msgstr "" "Glance は、'pycrypto' モジュールの代わりに `python 'cryptography' モジュール" "`_ を使用するようになりました。" msgid "" "Glance uses the ``cursive`` library's functionality to verify digital " "signatures. To familiarize yourself with this new dependency and see the " "list of transitive dependencies visit http://git.openstack.org/cgit/" "openstack/cursive" msgstr "" "Glance は ``cursive`` ライブラリーの機能をデジタル署名の検証に使用します。こ" "の新しい依存関係に慣れ、依存関係のリストを見るには、 http://git.openstack." "org/cgit/openstack/cursive を参照してください。" msgid "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to remove an image location when the image status is not ``active``" msgstr "" "イメージの状態が ``active`` にない場合、イメージの場所の置き換えをしようとす" "ると、HTTP Response Code 409 (Conflict) が返されます。" msgid "" "HTTP Response Code 409 (Conflict) will be returned in response to an attempt " "to replace an image location when the image status is not ``active`` or " "``queued``" msgstr "" "イメージの状態が ``active`` あるいは ``queued`` にない場合、イメージの場所の" "置き換えをしようとすると、HTTP Response Code 409 (Conflict) が返されます。" msgid "" "Here is a list of other important bugs that have been fixed (or partially " "fixed) along with their descriptions." msgstr "" "フィックス(あるいは一部フィックス)された、その他の重要なバグの一覧です。" msgid "Here is a list of possible return codes:" msgstr "戻りコードの一覧です:" msgid "" "If ``enable_image_import`` is set **False**, requests to the v2 endpoint for " "URIs defined only in v2.6 will return 404 (Not Found) with a message in the " "response body stating \"Image import is not supported at this site.\" " "Additionally, the image-create response will not contain the \"OpenStack-" "image-import-methods\" header." msgstr "" "``enable_image_import`` が **False** に設定されている場合、2.6 のみが定義され" "た v2 エンドポイントへのリクエストは、\"Image import is not supported at " "this site.\" のレスポンス本文を持つ 404 (Not Found) を返します。また、image-" "create の応答は、\"OpenStack-image-import-methods\" ヘッダーを含みません。" msgid "" "If an image has a visiblity of 'private' when viewed in the v2 API, then " "that image will not accept members in the v1 API. If a user wants to share " "such an image, the user can:" msgstr "" "v2 API で参照するときに、イメージが 'private' の可視性を持つ場合、v1 API では" "そのイメージはメンバーを受け入れません。ユーザーがそのようなイメージを共有し" "たい場合は、下記のようにすることができます:" msgid "" "If configured to work in daemon mode, the Scrubber will log an error message " "at level critical, but will not exit the process." msgstr "" "デーモンモードで動作するように設定した場合、Scrubber はクリティカルレベルでエ" "ラーメッセージを出力しますが、プロセスを終了しません。" msgid "" "If configured to work in non-daemon mode, the Scrubber will log an error " "message at level critical and exit with status one." msgstr "" "非デーモンモードで動作するように設定した場合、Scrubber はクリティカルレベルで" "エラーメッセージを出力し、ステータス 1 で終了します。" msgid "" "If you wish to enable the EXPERIMENTAL version 2.6 API that contains the new " "interoperable image import functionality, set the configuration option " "``enable_image_import`` to True in the glance-api.conf file. The default " "value for this option is False." msgstr "" "新しい相互運用可能なイメージのインポート機能を含む、実験的なバージョン 2.6 " "の API を有効にしたい場合、glance-api.conf ファイルで設定オプションの " "``enable_image_import`` を True に設定してください。このオプションのデフォル" "ト値は False です。" msgid "Image 'visibility' changes." msgstr "イメージの「可視性」を変更しました。" msgid "" "Image location updates to an image which is not in ``active`` or ``queued`` " "status can introduce race conditions and security issues and hence a bad " "experience for users and operators. As a result, we have restricted image " "location updates in this release. Users will now observe the following:" msgstr "" "``active`` または ``queued`` ステータスではないイメージの場所の変更は、競合状" "態やセキュリティ問題、ユーザーやオペレーターに対する不利益をもたらす可能性が" "あります。よって、イメージの場所の更新は、このリリースで制限されます。ユー" "ザーは以下をのことに遭遇するかもしれません:" msgid "Image visibility is changed using the image update (PATCH) call." msgstr "" "イメージの可視性は、イメージ変更 (PATCH) 呼び出しを使用して変更されます。" msgid "Image visibility may be specified at the time of image creation." msgstr "イメージの可視性は、イメージ作成の時に指定できます。" msgid "" "Images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) **and** that have image members, will " "have their visibility set to 'shared'." msgstr "" "可視性 'private' (データベース上で 'is_public' が False のイメージ) を持つ、" "**かつ**イメージのメンバーを持つ、すべてのイメージは、可視性を 'shared' に設" "定されます。" msgid "" "Impact of the Ocata visibility changes on end users of the Images API v2" msgstr "Ocata の可視性の変更による、Images API v2 のエンドユーザーへの影響" msgid "Impact of the Ocata visibility changes on the Images API v1" msgstr "Ocata の可視性の変更による、Images API v1 への影響" msgid "" "Implement the ability to filter images by the properties `id`, `name`, " "`status`,`container_format`, `disk_format` using the 'in' operator between " "the values. Following the pattern of existing filters, new filters are " "specified as query parameters using the field to filter as the key and the " "filter criteria as the value in the parameter. Filtering based on the " "principle of full compliance with the template, for example 'name = in:deb' " "does not match 'debian'. Changes apply exclusively to the API v2 Image " "entity listings An example of an acceptance criteria using the 'in' operator " "for name ?name=in:name1,name2,name3. These filters were added using syntax " "that conforms to the latest guidelines from the OpenStack API Working Group." msgstr "" "値に 'in' 演算子を使った、`id`、`name`、`status`、`container_format`、" "`disk_format` プロパティによるイメージのフィルタリング機能を実装しました。既" "存のフィルターのパターンに従い、新しいフィルターをクエリーパラメーターとして" "指定します。このパラメーターは、キーとしてフィルタリングするフィールドを使用" "し、パラメーターの値としてフィルター条件を使用します。テンプレートの完全な遵" "守の原則に基づくフィルタリングでは、例えば、 'name = in:deb' は 'debian' と一" "致しません。変更は API v2 Image エンティティリストにのみ適用されます。name " "に 'in' 演算子を使用する受け入れ可能な例、?name=in:name1,name2,name3 。これら" "のフィルターは、OpenStack API ワーキンググループの最新のガイドラインに準拠し" "た構文を使用して追加されました。" msgid "" "Implemented re-authentication with trusts when updating image status in " "registry after image upload. When long-running image upload takes some a lot " "of time (more than token expiration time) glance uses trusts to receive new " "token and update image status in registry. It allows users to upload big " "size images without increasing token expiration time." msgstr "" "イメージのアップロード後にレジストリ内のイメージステータスを更新するときに、" "信頼での再認証を実装しました。長時間実行されるイメージのアップロードには、" "トークンの有効期限よりも長い時間がかかることがありますが、glance は新しいトー" "クンを受け取り、レジストリ内のイメージの状態を更新するために信頼を使用しま" "す。トークンの有効期限を延長することなく、大きなサイズのイメージをアップロー" "ドすることができます。" msgid "Improved configuration option descriptions and handling." msgstr "設定オプションの説明と処理が改善されました。" msgid "" "In Newton, the majority of the signature verification code was removed from " "Glance. ``cursive`` has been added to Glance as a dependency and will be " "installed by default." msgstr "" "Newtonでは、署名検証コードの大部分が Glance から削除されました。``cursive`` " "は Glance に依存関係として追加され、デフォルトでインストールされます。" msgid "" "In accord with current OpenStack policy, Glance log messages are `no longer " "translated`_." msgstr "" "現在の OpenStack ポリシーに従い、Glance のログメッセージは、`もう翻訳されてい" "ません `_。" msgid "" "In order to check the current state of your database upgrades, you may run " "the command ``glance-manage db check``. This will inform you of any " "outstanding actions you have left to take." msgstr "" "データベースのアップグレードの現在の状態を確認するには、``glance-db check`` " "コマンドを実行してください。これにより、残っている未解決のアクションが通知さ" "れます。" msgid "" "In order to preserve backward compatibilty with the current sharing " "workflow, the default visibility of an image in Ocata is 'shared'. " "Consistent with pre-Ocata behavior, this will allow the image to accept " "member operations without first updating the visibility of the image. (Keep " "in mind that an image with visibility 'shared' but having no members is not " "actually accessible to anyone other than the image owner, so this is not in " "itself a security problem.)" msgstr "" "現在の共有ワークフローとの後方互換性を維持するために、Ocata のイメージのデ" "フォルトの可視性は「shared」になっています。Ocata 以前の動作と一貫して、イ" "メージの可視性を最初に更新することなく、イメージのメンバー操作を受け入れるこ" "とができます。 (「shared」の可視性を持つがメンバーを持たないイメージは、イ" "メージ所有者以外の誰も実際にアクセスできないため、セキュリティ上の問題はあり" "ません。)" msgid "" "In order to prevent users spamming other users' image-list response, " "community images are not included in the image-list response unless " "specifically requested by a user." msgstr "" "ユーザーが他のユーザーの image-list 応答をスパムすることを防止するために、コ" "ミュニティイメージは、ユーザによって特に要求されない限り、image-list 応答に含" "まれません。" msgid "" "In the Newton release, the Glare code was relocated into its own `Glare`_ " "project repository. Also in the Newton release, Glare ran an EXPERIMENTAL " "Artifacts API versioned as ``v1.0`` on its own endpoint and used its own " "database." msgstr "" "Newton リリースでは、Glare コードが独自の `Glare`_ プロジェクトリポジトリに再" "配置されました。また、Newton リリースでは、Glare は独自のエンドポイントで " "``v1.0`` としてバージョン付けされた実験的な Artifacts API を実行し、独自の" "データベースを使用しています。" msgid "" "In the v1 API, images have an ``is_public`` field (but no ``visibility`` " "field). Images for which ``is_public`` is True are the equivalent of images " "with 'public' visibility in the v2 API. Images for which ``is_public`` is " "false are the equivalent of v2 'shared' images if they have members, or the " "equivalent of v2 'private' images if they have no members." msgstr "" "v1 APIでは、イメージは ``is_public`` フィールドを持ちます(ただし、" "``visibility`` フィールドはありません)。``is_public`` が True のイメージは、" "v2 API の 'public' 可視性を持つイメージと同等です。``is_public`` が False の" "画像は、メンバーがある場合は v2 の 'shared' イメージに相当し、メンバーがない" "場合は v2 の 'private' イメージに相当します。" msgid "" "Include a ``Content-Range`` header upon successful delivery of the requested " "partial content." msgstr "" "要求された部分コンテンツの配信が成功すると、``Content-Range`` ヘッダーを含め" "ます。" msgid "" "It must be noted that the configuration options that take integer values now " "have a strict range defined with ``min`` and/or ``max`` boundaries where " "appropriate." msgstr "" "整数値をとる設定オプションは、必要に応じて、``min`` や ``max`` で定義された厳" "密な範囲を持つようになったことに注意しなければなりません。" msgid "Known Issues" msgstr "既知の問題" msgid "" "Latest release of ``glance_store`` library (used in the **Newton** release " "of Glance) will include fix for the ``glance_store`` bug 1619487." msgstr "" "``glance_store`` ライブラリ(Glance の ** Newton** リリースで使用されていま" "す)の最新リリースには、``glance_store`` のバグ 1619487 の修正が含まれていま" "す。" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "" "Location updates for images are now restricted to images in ``active`` or " "``queued`` status. Please refer to the \"Bug Fixes\" section for more " "information." msgstr "" "イメージの場所の更新は、``active`` または ``queued`` ステータスのイメージに限" "定されます。詳細については、「バグ修正」セクションを参照してください。" msgid "" "Maintaining two different ways to configure, enable and/or disable a feature " "is painful for developers and operators, so the less granular means of " "controlling this feature will be eliminated in the **Ocata** release." msgstr "" "機能の有効化と無効化を設定する 2 つの異なる方法を維持することは、開発者やオペ" "レーターにとっては苦痛であるため、**Ocata** リリースではこの機能を制御するた" "めの細かい手段が省かれます。" msgid "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight." msgstr "" "以前は OS::Nova::Instance に関連付けられていたメタデータ定義は、Heat と " "Searchlight に合わせるために OS::Nova::Server に関連付けるよう変更されまし" "た。" msgid "" "Metadata definitions previously associated with OS::Nova::Instance have been " "changed to be associated with OS::Nova::Server in order to align with Heat " "and Searchlight. You may either upgrade them using glance-manage db " "load_metadefs [path] [merge] [prefer_new] or glance-manage db upgrade 44." msgstr "" "以前は OS::Nova::Instance に関連付けられていたメタデータ定義は、Heat と " "Searchlight に合わせるために OS::Nova::Server に関連付けるよう変更されまし" "た。glance-manage db load_metadefs [path] [merge] [prefer_new] または glance-" "manage db upgrade 44 を使用してアップグレードできます。" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "" "Note that in either case, when dealing with an image that has 'private' " "visibility in the v2 API, there is a safeguard against a user " "unintentionally adding a member to an image and exposing data. The " "safeguard is that you must perform an additional image update operation in " "either the v1 or v2 API before you can expose it to other users." msgstr "" "v2 API で 'private' 可視性を持つイメージを扱う場合、ユーザーが意図せずにイ" "メージにメンバーを追加してデータを公開することに対する安全策があることに注意" "してください。 安全策は、他のユーザーに公開する前に、v1 または v2 API のどち" "らでも、イメージ更新操作を追加で実行する必要があることです。" msgid "" "Note that such images will have to have their visibility updated to 'shared' " "before they will accept members." msgstr "" "そのようなイメージは、メンバーを受け入れる前に、その可視性が 'shared' に更新" "されなければならないことに注意してください。" msgid "" "Note that the plugin applies *only* to images imported via the " "`interoperable image import process`_. Thus images whose data is set using " "the `image data upload`_ call will *not* be processed by the plugin and " "hence will not have properties injected. You can force end users to use the " "interoperable image import process by restricting the data upload call, " "which is governed by the ``upload_image`` policy in the Glance ``policy." "json`` file. See the documentation for more information." msgstr "" "プラグインは、`相互運用可能なイメージのインポートプロセス`_によってインポート" "されたイメージに*のみ*適用することに注意してください。したがって、`image " "data upload`_ 呼び出しを使用してデータが設定されたイメージは、プラグインで処" "理され*ない*ため、プロパティが注入されません。Glance の ``policy.json`` ファ" "イルの ``upload_image`` ポリシーによって管理されている data upload 呼び出し制" "限により、エンドユーザーに相互運用可能なイメージインポートプロセスを使用させ" "ることができます。詳細は、ドキュメントを参照してください。" msgid "" "Note that there are race conditions associated with adding a location to an " "image in the ``active``, ``queued``, ``saving``, or ``deactivated`` status. " "Because these are non-terminal image statuses, it is possible that when a " "user attempts to add a location, a status transition could occur that might " "block the **add** (or might appear to allow an add that should not be " "allowed)." msgstr "" "``active``、``queued``、``saving``、または ``deactivated`` の状態でイメージに" "場所を追加することに関連する競合状態があることに注意してください。これらは端" "末でのイメージの状態ではないため、ユーザーが場所を追加しようとしたときに、**" "追加**(あるいは、許可されていない追加を許可しているように見えるかもしれませ" "ん) をブロックする可能性のある状態遷移が発生するかもしれません。" msgid "Note the code name for the \"ceph\" driver is ``rbd``." msgstr "" "\"ceph\" ドライバーのコードネームが ``rbd`` であることに注意してください。" msgid "Note the ordering of the options within a store is not alphabetical." msgstr "" "登録されているオプションの順序がアルファベット順でないことに注意してくださ" "い。" msgid "Note: This is not a change. It's simply mentioned for completeness." msgstr "" "注: これは変更ではありません。完全性のために単純に別記しているだけです。" msgid "" "OSprofiler support requires passing of trace information between various " "OpenStack services. This information is signed by one of HMAC keys, which we " "historically defined in glance-api-paste.ini and glance-registry-paste.ini " "files (together with enabled option, that in fact was duplicated in the " "corresponding configuration files). OSprofiler 0.3.1 and higher supports " "passing this information via configuration files, therefore it's recommended " "to modify the ``[filter:osprofiler]`` section in \\*-paste.ini to look like " "``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` and set the " "``hmac_keys`` option in the glance-\\*.conf files." msgstr "" "OSprofiler のサポートでは、さまざまな OpenStack サービス間でトレース情報を渡" "す必要があります。この情報は、歴史的に glance-api-paste.ini ファイルと " "glance-registry-paste.ini ファイルで定義されている HMAC キーの 1 つによって署" "名されています(実際には対応する設定ファイルに複製されています)。OSprofiler " "0.3.1 以降ではこの情報を設定ファイルで渡すことができますので、\\*-paste.ini " "の ``[filter:osprofiler]`` セクションを、``paste.filter_factor = osprofiler." "web:WsgiMiddleware.factory`` のように変更し、glance-\\*.conf ファイルに " "``hmac_keys`` オプションを設定することをお勧めします。" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "On this release requirements.txt were synced from global-requirements." msgstr "" "このリリースでは、requirements.txt が global-requirements から同期されまし" "た。" msgid "" "OpenStack deployments, packagers, and deployment projects which provided " "Glare should have begun to consume Glare from its own `Glare`_ respository " "during the Newton and Ocata releases. With the Pike release, it is no " "longer possible to consume Glare code from the Glance repository." msgstr "" "Glare を提供する OpenStack のデプロイメント、パッケージャー、およびデプロイメ" "ントプロジェクトは、Newton と Ocata のリリースの間に、Glare が 自身の " "`Glare`_ リポジトリから Glare を使い始めるはずでした。Pike リリースでは " "Glance リポジトリから Glare コードを使用することはもはや不可能です。" msgid "" "Options where a negative value was previously defined (for example, -1 to " "mean unlimited) will remain unaffected by this change." msgstr "" "以前に負の値が定義されていたオプション(たとえば、無制限を意味する -1)は、こ" "の変更の影響を受けません。" msgid "Other Notes" msgstr "その他の注意点" msgid "Pike Series Release Notes" msgstr "Pike バージョンのリリースノート" msgid "" "Please note a change in the Scrubber's behavior in case of job fetching " "errors:" msgstr "ジョブフェッチエラーの場合のスクラバーの動作の変更に注意してください: " msgid "" "Please note that not all Glance storage backends support partial downloads. " "A Range request to a Glance server with such a backend will result in the " "entire image content being delivered despite the 206 response code." msgstr "" "すべての Glance ストレージバックエンドが部分的なダウンロードをサポートするわ" "けではないことに注意してください。そのようなバックエンドを持つ Glanceサーバー" "へのRange リクエストは、206 レスポンスコードにもかかわらず、イメージコンテン" "ツ全体が配信される結果になります。" msgid "Prelude" msgstr "紹介" msgid "" "Prior to Ocata, an image with 'private' visibility could become shared by " "adding members to it, though its visibility remained 'private'. In order to " "make the visibility of images more clear, in Ocata the following changes are " "introduced:" msgstr "" "Ocata 以前は、'private' 可視性を持つイメージは、その可視性が 'private' であっ" "たにもかかわらず、それにメンバーを追加することによって共有することができまし" "た。Ocata では、イメージの可視性をより明確にするために、以下の変更が導入され" "ています:" msgid "" "Prior to Ocata, the Glance database did not have a 'visibility' column, but " "instead used a boolean 'is_public' column, which was translated into " "'public' or 'private' visibility in the Images API v2 image response. As " "part of the upgrade to Ocata, a 'visibility' column is introduced into the " "images table. It will be populated as follows" msgstr "" "Ocata 以前は、Glance データベースには 'visibility' の列はありませんでしたが、" "代わりに ブール値の 'is_public' 列が使用されていました。この列は Images API " "v2 のイメージレスポンスで 'public' または 'private' 可視性に変換されていまし" "た。Ocata へのアップグレードの一環として、'visibility' 列が images テーブルに" "導入されています。これには、次のように入力されます" msgid "Queens Series Release Notes" msgstr "Queens バージョンのリリースノート" msgid "" "Sample configuration file shipped with Glance source now has reordered store " "drivers configuration options for future consistent ordering." msgstr "" "Glance のソースコードに同梱されている設定ファイルのサンプルには、将来的な一貫" "した順番のためにストアードライバの設定オプションが並べ替えられました。" msgid "Security Issues" msgstr "セキュリティー上の問題" msgid "" "See the documentation in the sample glance-api.conf file for more " "information." msgstr "詳細については、glance-api.conf ファイルのサンプルを参照してください。" msgid "" "Several `new values`_ were added for the ``vmware_ostype`` property in the " "``OS::Compute::VMware`` namespace." msgstr "" "``OS::Compute::VMware`` 名前空間の ``vmware_ostype`` プロパティにいくつかの`" "新しい値`が追加されました。" msgid "" "Since the default value for 'visibility' upon image creation is 'shared', an " "image freshly created using the v1 API can have members added to it, just as " "it did pre-Ocata." msgstr "" "イメージ作成時の「可視性」のデフォルト値は 'shared' なので、v1 API を使用して" "新しく作成されたイメージは、Ocata 以前と同じように、メンバーを追加できます。" msgid "" "Some additional points about ``show_multiple_locations`` configuration " "option deprecation." msgstr "" "``show_multiple_locations`` の設定オプションの廃止予定に関するいくつかの追加" "ポイント。" msgid "" "Some backend store names were inconsistent between glance and glance_store. " "This meant that operators of the VMware datastore or file system store were " "required to use store names in ``glance-api.conf`` that did not correspond " "to any valid identifier in glance_store. As this situation encouraged " "misconfiguration and operator unhappiness, we have made the store names " "consistent in the Newton release. What this means for you:" msgstr "" "いくつかのバックエンドストアー名は glance と glance_store の間で矛盾していま" "した。これは、VMware データストアー、あるいはファイルシステムストアーのオペ" "レーターが、glance_store で有効な識別子に対応していないストアー名を ``glance-" "api.conf`` に使用する必要があることを意味していました。このような状況が誤った" "設定とオペレータの不幸を助長したため、Newton リリースではストアー名が一貫する" "ようにしました。これの意味するところは:" msgid "" "Some operators have reported issues with reordering observed in the sample " "configurations shipped with Glance release tarballs. This reordering may " "result into a incorrect \"diff\" of the configurations used downstream vs. " "newly introduced upstream." msgstr "" "一部のオペレータは、Glance のリリース tarballs に同梱されているサンプル設定に" "見られる並べ替えの問題を報告しています。この並べ替えによって、ダウンストリー" "ムで使用されていた設定と新しく導入されたアップストリームの設定との間で、誤っ" "た「差分」を生じる可能性があります。" msgid "Start using reno to manage release notes." msgstr "リリースノートの管理に reno を使い始めました。" msgid "" "Such an image will require its visibility to be updated to 'shared' before " "it will accept members." msgstr "" "そのようなイメージは、メンバーを受け入れる前に、その可視性を 'shared' に更新" "する必要があります。" msgid "" "The 'visibility' enumeration has been increased from two values (``public``, " "``private``) to four values (``public``, ``private``, ``shared``, and " "``community``)." msgstr "" "'visibility' 列挙子は、2 つの値 (``public`` と ``private``) から 4 つの値 " "(``public``、``private``、``shared``、および ``community``) に増加しました。" msgid "" "The **CURRENT** version of the Images API supplied by Glance is introduced " "as **2.6**. It includes the new API calls introduced on an experimental " "basis in the Pike release." msgstr "" "Glance によって提供される Images API の**現行**バージョンは ** 2.6 ** として" "導入されました。これは、Pike リリースで実験的に導入された新しい API 呼び出し" "を含みます。" msgid "" "The **CURRENT** version of the version 2 Images API supplied by Glance is " "now **2.5**. Changes include:" msgstr "" "Glance によって提供されるバージョン 2 Images API の**現行**バージョンは ** " "2.5 ** です。 変更点は次のとおりです。" msgid "" "The *Community Images* feature has been introduced in the Images API v2. " "This enables a user to make an image available for consumption by all other " "users. In association with this change, the 'visibility' values for an " "image have been expanded to include 'community' and 'shared'." msgstr "" "*コミュニティイメージ*機能が Images API v2 に導入されました。これにより、ユー" "ザーは他のすべてのユーザーがイメージを利用できるようにすることができます。こ" "の変更に伴い、イメージの「可視性」値が「community」と「shared」を含むように拡" "大されました。" msgid "The *minor* version of the Images API v2 is bumped to **2.5**." msgstr "" "Images API v2 の*マイナー*バージョンは ** 2.5 ** に引き上げられました。" msgid "" "The *minor* version of the Images API v2 is bumped to **2.6** to introduce " "an EXPERIMENTAL version of the API that includes the new calls introduced " "for the Minimal Viable Product delivery of the `refactored image import`_ " "functionality. Version **2.5** remains the CURRENT version of the Images " "API." msgstr "" "`refactored image import`_ 機能の Minimal Viable Product 配信のために導入され" "た新しい呼び出しを含む API の実験的バージョンを導入するために、Images API v2 " "の*マイナー*バージョンは **2.6** に引き上げられました。バージョン **2.5** は " "Image API の現行バージョンのままです。" msgid "" "The Artifacts API was an EXPERIMENTAL API that ran on the Glance service " "endpoint as ``/v3`` in the Liberty release. In the Mitaka release, the " "Glance ``/v3`` EXPERIMENTAL API was deprecated and the Artifacts Service ran " "on its own endpoint (completely independent from the Glance service " "endpoint) as an EXPERIMENTAL API, versioned as ``v0.1``. In both the " "Liberty and Mitaka releases, Glare ran on code stored in the Glance code " "repository and used its own tables in the Glance database." msgstr "" "Artifacts API は、Liberty リリースでは Glance サービスエンドポイント上で ``/" "v3`` として実行された実験的な API でした。Mitaka リリースでは、Glance ``/" "v3`` の実験的 API は廃止予定となり、Artifacts サービスは ``v0.1`` としてバー" "ジョン付された実験的 API として独自のエンドポイント(Glance サービスエンドポ" "イントから完全に独立)で動作しました。Liberty と Mitaka リリースの両方では、" "Glare は Glance コードリポジトリに格納されたコードを実行し、Glance データベー" "スに独自のテーブルを使用しました。" msgid "" "The CURRENT version of the Images API v2 is bumped to **2.6**. The 2.6 API " "was available in the previous (Pike) release as an experimental API to " "introduce the calls necessary for the `interoperable image import " "functionality`_." msgstr "" "Images API v2 の現行バージョンは、**2.6** に引き上げられました。2.6 API は、" "以前の (Pike) リリースで、`相互運用可能なイメージのインポート機能`_ に必要な" "呼び出しを導入する実験的な API として利用可能でした。" msgid "" "The DEPRECATED Images API v1 does not have a concept of \"visibility\", and " "in a \"pure\" v1 deployment, you would not notice that anything had " "changed. Since, however, we hope that there aren't many of those around " "anymore, here's what you can expect to see if you use the Images API v1 in a " "\"mixed\" deployment." msgstr "" "廃止予定となった Images API v1 は、「可視性」のコンセプトを持っておらず、v1 " "のみの構成では、何が変更されたか気づくことができません。しかし、このような状" "況にならないことを願い、もし混在環境で Image API v1 を使用した場合に想定され" "ることを次に記します。" msgid "" "The Glance Registry Service and its APIs are officially DEPRECATED in this " "release and are subject to removal at the beginning of the 'S' development " "cycle, following the `OpenStack standard deprecation policy `_." msgstr "" "Glance レジストリーサービスとその API は、このリリースでは正式に廃止予定とな" "り、`OpenStack standard deprecation policy `_ に従って、'S' 開" "発サイクルの最初に削除される可能性があります。" msgid "" "The Glance documentation section `Running Glance in HTTPD`_ outlines some " "approaches to use (and not to use) Glance with the Apache httpd server. This " "is the way Glance is configured as a WSGI application in devstack, so it's " "the method with which we've had the most experience. If you try deploying " "Glance using a different web server, please consider contributing your " "findings to the Glance documentation." msgstr "" "Glance のドキュメンテーションの `HTTPD で Glance を実行する`_ セクションで" "は、Apache の httpd サーバで Glance を使用する(使用しない)方法の概要が記載" "されています。これは、Glance が devstack の WSGI アプリケーションとして設定さ" "れる方法です。そのため、これは私たちが最も経験を積んだ方法です。別の Web サー" "バを使用して Glance を配備しようとする場合は、結果を Glance のドキュメントに" "投稿することを検討してくださいませ。" msgid "" "The Image Service API Reference has been updated with a section on the " "`Interoperable image import`_ process (also known as \"image import " "refactored\") and the API calls that are exposed to implement it in the " "EXPERIMENTAL v2.6 of the API." msgstr "" "イメージサービス API リファレンスは、`相互運用可能なイメージのインポート`_プ" "ロセス(「イメージインポートリファクタリング」とも呼ばれる)セクションで更新" "され、その API 呼び出しが実験的な API v2.6 で実装するために公開されました。" msgid "" "The Images (Glance) version 1 API has been DEPRECATED. Please see " "deprecations section for more information." msgstr "" "Images (Glance) バージョン 1 API は廃止予定となりました。詳細は、廃止予定の機" "能セクションを参照してください" msgid "" "The `Tasks API`_ was made admin-only by default in Mitaka by restricting the " "following policy targets to **role:admin**: **get_task**, **get_tasks**, " "**add_task**, and **modify_task**." msgstr "" "`Tasks API`_ は、Mitaka では、以下のポリシーターゲットを **role:admin** に制" "限することによってデフォルトで管理者のみになっていました: **get_task**、" "**get_tasks**、**add_task**、**modify_task**" msgid "" "The ``cursive`` library is an OpenStack project which implements OpenStack-" "specific verification of digital signatures." msgstr "" "``cursive`` ライブラリーは、電子署名の OpenStack 固有な検証を実装した " "OpenStack プロジェクトです。" msgid "" "The ``db_downgrade`` command has been removed from the ``glance-manage`` " "utility and all database downgrade scripts have been removed. In accord " "with OpenStack policy, Glance cannot be downgraded any more. Operators are " "advised to make a full database backup of their production data before " "attempting any upgrade." msgstr "" "``db_downgrade`` コマンドが ``glance-manage`` ユーティリティから削除され、す" "べてのデータベースダウングレードスクリプトが削除されました。OpenStack のポリ" "シーによれば、Glance はもうダウングレードできません。オペレーターは、アップグ" "レードをしようとする前に、本番データの完全なデータベースのバックアップを作成" "することをおすすめします。" msgid "" "The ``default`` policy in ``policy.json`` now uses the admin role rather " "than any role. This is to make the policy file restrictive rather than " "permissive and tighten security." msgstr "" "``policy.json`` の ``default`` ポリシーは、任意のロールではなく管理者ロールを" "使用します。これは、ポリシーファイルを許可的でなく制限的にし、セキュリティを" "強化するためです。" msgid "" "The ``disk_format`` config option enables ``ploop`` as supported by default." msgstr "" "``disk_format`` 設定オプションは、デフォルトでサポートされている ``ploop`` を" "有効にします。" msgid "" "The ``disk_format`` config option enables ``vhdx`` as supported by default." msgstr "" "``disk_format`` 設定オプションは、デフォルトでサポートされている ``vhdx`` を" "有効にします。" msgid "" "The ``enable_image_import`` configuration option was introduced as " "DEPRECATED in Pike and will be removed in Rocky." msgstr "" "Pike で廃止予定となった ``enable_image_import`` 設定オプションは、Rocky で削" "除される予定です。" msgid "The ``s3`` store driver has been removed." msgstr "``s3`` ストアドライバーは削除しました。" msgid "" "The `documentation was reorganized`_ in accord with the new standard layout " "for OpenStack projects." msgstr "" "OpenStack プロジェクトの新しい標準レイアウトに従い、`ドキュメントを再構築しま" "した`_。" msgid "" "The ability to update an image to have 'community' visibility is governed by " "a policy target named 'communitize_image'. The default is empty, that is, " "any user may communitize an image." msgstr "" "イメージを更新して 'community' 可視性を持たせる機能は、'communitize_image' と" "いう名前のポリシーターゲットによって管理されます。デフォルトは空です。つま" "り、どのユーザーもイメージを共有できます。" msgid "" "The change in migration engine has been undertaken in order to enable zero-" "downtime database upgrades, which are part of the effort to implement " "rolling upgrades for Glance (scheduled for the Pike release)." msgstr "" "Glance のローリングアップグレード (Pike リリース予定) の実装の一環であるゼロ" "ダウンタイムデータベースのアップグレードを可能にするために、移行エンジンの変" "更が行われました。" msgid "" "The database migration engine used by Glance for database upgrades has been " "changed from *SQLAlchemy Migrate* to *Alembic* in this release." msgstr "" "Glance がデータベースのアップグレードに使用したデータベース移行エンジンが、こ" "のリリースの *SQLAlchemy Migrate* から *Alembic* に変更されました。" msgid "" "The default value of 'shared' may seem weird, but it preserves the pre-" "upgrade workflow of: (1) create an image with default visibility, (2) add " "members to that image. Further, an image with a visibility of 'shared' that " "has no members is not accessible to other users, so it is functionally a " "private image." msgstr "" "デフォルト値の 'shared' は奇妙に見えるかもしれませんが、(1) デフォルトの可視" "性を持つイメージを作成し、(2) そのイメージにメンバーを追加する、というアップ" "グレード前のワークフローを補助します。さらに、可視性が 'shared' で、メンバー" "を持たないイメージには他のユーザーからアクセスできないため、機能的には非公開" "イメージです。" msgid "" "The deprecation path for the configuration option " "``show_multiple_locations`` has been changed because the mitigation " "instructions for `OSSN-0065`_ refer to this option. It is now subject to " "removal on or after the **Pike** release. The help text for this option has " "been updated accordingly." msgstr "" "設定オプション ``show_multiple_locations`` の廃止予定のパスは、`OSSN-0065`_ " "の緩和指示がこのオプションを参照するため変更されました。**Pike** リリース以降" "に削除される可能性があります。このオプションのヘルプテキストは、それに応じて" "更新されました。" msgid "" "The discovery calls defined in the `refactored image import spec`_ remain in " "an abbreviated form in this release." msgstr "" "`refactored image import spec`_ で定義されている探索呼び出しは、このリリース" "では省略形のままです。" msgid "" "The following are some highlights of the bug fixes included in this release." msgstr "以下は、このリリースに含まれるバグ修正のハイライトの一部です。" msgid "" "The following metadata definitions have been modified in the Pike release:" msgstr "Pike リリースでは、以下のメタデータ定義が変更されています:" msgid "" "The following metadata definitions have been modified in the Queens release:" msgstr "Queens リリースでは、以下のメタデータ定義が変更されています:" msgid "" "The glance configuration options have been improved with detailed help " "texts, defaults for sample configuration files, explicit choices of values " "for operators to choose from, and a strict range defined with ``min`` and " "``max`` boundaries." msgstr "" "Glance 設定オプションは、詳細なヘルプテキスト、サンプル設定ファイルのデフォル" "ト、オペレータが明示的に選択できる値、``min`` と ``max`` で定義された厳密な範" "囲により改善されました。" msgid "" "The identifier ``ploop`` has been added to the list of supported disk " "formats in Glance. The respective configuration option has been updated and " "the default list shows ``ploop`` as a supported format." msgstr "" "Glance のサポートされているディスクフォーマットのリストに識別子 ``ploop`` が" "追加されました。それぞれの設定オプションが更新され、デフォルトのリストはサ" "ポートされている形式として ``ploop`` を表示します。" msgid "" "The identifier ``vhdx`` has been added to the list of supported disk formats " "in Glance. The respective configuration option has been updated and the " "default list shows ``vhdx`` as a supported format." msgstr "" "Glance のサポートされているディスクフォーマットのリストに識別子 ``vhdx`` が追" "加されました。それぞれの設定オプションが更新され、デフォルトのリストはサポー" "トされている形式として ``vhdx`` を表示します。" msgid "" "The image signature verification feature has been updated to follow the " "\"sign-the-data\" approach, which uses a signature of the image data " "directly. The prior deprecated \"sign-the-hash\" approach, which uses a " "signature of an MD5 hash of the image data, has been removed." msgstr "" "イメージ署名検証機能は、イメージデータの署名を直接使用する「sign-the-data」ア" "プローチに従うように更新されました。イメージデータの MD5 ハッシュの署名を使用" "する、以前に廃止予定になった「sign-the-hash」アプローチは削除されました。" msgid "" "The image-create operation allows a visibility to be set at the time of " "image creation. This option was probably not used much given that " "previously there were only two visibility values available, one of which " "('public') is by default unassignable by end users. Operators may wish to " "update their documentation or tooling to specify a visibility value when end " "users create images. To summarize:" msgstr "" "イメージ作成操作では、イメージ作成時に可視性を設定することができます。以前は " "2 つの可視性しか利用できなかったことを考えると、このオプションはおそらくあま" "り使われていないでしょう。そのうちの 1 つ ('public') は、デフォルトではエンド" "ユーザーによって割り当て解除できません。オペレーターは、エンドユーザーがイ" "メージを作成する際に、ドキュメントやツールを更新して可視性の値を指定したい場" "合があります。 要約すると:" msgid "" "The image-list call to the Images v2 API now recognizes a ``protected`` " "query-string parameter. This parameter accepts only two values: either " "``true`` or ``false``. The filter is case-sensitive. Any other value will " "result in a 400 response to the request. See the `protected filter " "specification`_ document for details." msgstr "" "Image v2 API への image-list の呼び出しで、クエリーストリングのパラメータ " "``protected`` が認識されるようになりました。このパラメータは、``true`` また" "は ``false`` の2つの値しか受け付けません。フィルターは大文字と小文字を区別し" "ます。他の値を指定すると、要求に対する応答が 400 になります。詳細は、" "`protected filter specification`_ ドキュメントを参照してください。" msgid "" "The initial implementation of the image signature verification feature in " "Glance was insecure, because it relied on an MD5 hash of the image data. " "More details can be found in bug 1516031. This \"sign-the-hash\" approach " "was deprecated in Mitaka, and has been removed in Newton. Related " "CVE-2015-8234." msgstr "" "Glance のイメージ署名検証機能の初期の実装は、イメージデータの MD5 ハッシュに" "依存していたため、安全ではありませんでした。詳細は、バグ 1516031 を参照してく" "ださい。この「sign-the-hash」アプローチは、Mitaka では廃止予定となり、Newton " "では削除されました。VE-2015-8234 に関連します。" msgid "" "The interoperable image import functionality uses the Glance tasks engine. " "This is transparent to end users, as they do *not* use the Tasks API for the " "interoperable image import workflow. The operator, however, must make sure " "that the following configuration options are set correctly." msgstr "" "相互運用可能なイメージのインポート機能は、Glance タスクエンジンを使用します。" "これは、相互運用可能なイメージインポートワークフローのために Tasks API を使用" "*しない*ため、エンドユーザーにとって透過的です。ただし、オペレーターは次の設" "定オプションが正しく設定されていることを確認する必要があります。" msgid "" "The latest release of glance_store library does not have the support for the " "``s3`` driver. All code references of the same have been removed from the " "library. As this release of Glance uses the updated glance_store library, " "you will find the ``s3`` driver support removed from Glance too. For example " "the Glance image location strategy modules no longer offer the ``s3`` driver " "support." msgstr "" "glance_store ライブラリの最新リリースでは、``s3`` ドライバーのサポートはあり" "ません。これに対するすべてコード参照がライブラリから削除されています。Glance " "の今回のリリースでは、更新された glance_store ライブラリが使用されているの" "で、``s3`` ドライバーサポートも Glance から削除されています。例えば、Glance " "イメージロケーションストラテジーモジュールは、もはや ``s3`` ドライバーサポー" "トを提供しません。" msgid "" "The lock_path config option from oslo.concurrency is now required for using " "the sql image_cache driver. If one is not specified it will default to the " "image_cache_dir and emit a warning." msgstr "" "oslo.concurrency の lock_path 設定オプションは、sql image_cache ドライバーの" "使用に必要です。指定されていない場合は、デフォルトで image_cache_dir になり、" "警告が出力されます。" msgid "" "The metadata definition for ``hypervisor_type`` in the ``OS::Compute::" "Hypervisor`` namespace has been extended to include the Virtuozzo " "hypervisor, designated as ``vz``. You may upgrade the definition using:" msgstr "" "``OS :: Compute :: Hypervisor`` 名前空間の ``hypervisor_type`` のメタデータ定" "義は、``vz`` として指定された Virtuozzo ハイパーバイザーを含むように拡張され" "ました。次のように定義をアップグレードすることができます。" msgid "" "The metadefs schemas for 'property', 'properties', 'tag', 'tags', 'object', " "and 'objects' previously specified a 'name' element of maximum 255 " "characters. Any attempt to add a name of greater than 80 characters in " "length, however, resulted in a 500 response. The schemas have been corrected " "to specify a maximum length of 80 characters for the 'name' field." msgstr "" "'property'、'properties'、'tag'、'tags'、'object'、'objects' のメタデータス" "キーマは、以前は最大 255 文字の 'name' 要素を指定していました。しかし、長さ" "が 80 文字を超える名前を追加しようとすると、500 応答になりました。スキーマが" "修正され、'name' フィールドに最大 80 文字の長さが指定されました。" msgid "" "The migration of image visibility assigns sensible values to images, namely, " "'private' to images that end users have *not* assigned members, and 'shared' " "to those images that have members at the time of the upgrade. Previously, " "if an end user wanted to share a private image, a member could be added " "directly. After the upgrade, the image will have to have its visibility " "changed to 'shared' before a member can be assigned." msgstr "" "イメージの可視性の移行は、アップグレード時に妥当な値をイメージに割り当てま" "す。すなわち、エンドユーザーがメンバーを割り当てて*いない*イメージに " "'private' を割り当て、メンバーを持つイメージに 'shared' を割り当てます。 以前" "は、エンドユーザーがプライベートのイメージを共有したい場合、メンバーを直接追" "加することができました。アップグレード後は、メンバーシップを割り当てる前に、" "イメージの可視性を 'shared' に変更する必要があります。" msgid "" "The new ``tasks_api_access`` policy target directly controls access to the " "Tasks API, whereas targets just mentioned indirectly affect what can be " "manipulated via the API by controlling what operations can be performed on " "Glance's internal task objects. The key point is that if you want to expose " "the new interoperable image import process to end users while keeping the " "Tasks API admin-only, you can accomplish this by using the following " "settings:" msgstr "" "新しい ``tasks_api_access`` ポリシーターゲットは Tasks API へのアクセスを直接" "制御しますが、ターゲットは Glance の内部タスクオブジェクトに対して実行できる" "操作を制御することで、API 経由で操作できるものに間接的に影響します。要点は、" "Tasks API を管理者専用にしつつ、新しい相互運用可能なイメージのインポートプロ" "セスをエンドユーザーに公開したい場合は、次の設定を使用してこれを実現できま" "す。" msgid "" "The property ``img_hide_hypervisor_id`` has been added to the namespace " "``OS::Compute::LibvirtImage``." msgstr "" "プロパティ ``img_hide_hypervisor_id`` が名前空間 ``OS::Compute::" "LibvirtImage`` に追加されました。" msgid "" "The property img_linked_clone_ has been added to the namespace ``OS::" "Compute::VMware``." msgstr "" "プロパティ img_linked_clone_ が名前空間 ``OS::Compute::VMware`` に追加されま" "した。" msgid "" "The sample configuration files autogenerated using the oslo-config-generator " "tool now give consistent ordering of the store drivers configurations." msgstr "" "oslo-config-generator ツールを使用して自動生成されたサンプル設定ファイルで" "は、ストアードライバーの設定は一貫して順序付けられます。" msgid "" "The store drivers configuration order in the sample or autogenerated files " "should be expected to be alphabetical as - ``cinder``, ``filesystem``, " "``http``, ``rbd``, ``sheepdog``, ``swift``, ``vmware``." msgstr "" "サンプルまたは自動生成されたファイルのストアードライバー設定の順序は、" "``cinder``、``filesystem``、``http``、``rbd``、``sheepdog``、``swim``、" "``vmware`` のようにアルファベット順になります。" msgid "" "The task API is being deprecated and it has been made admin only. If " "deployers of Glance would like to have this API as a public one, it is " "necessary to change the `policy.json` file and remove `role:admin` from " "every `task` related field." msgstr "" "タスク API は廃止予定となり、管理者のみになりました。Glance のデプロイヤーが" "この API を公開したい場合は、`policy.json` ファイルを変更し、` task` 関連の各" "フィールドから `role:admin` を削除する必要があります。" msgid "" "The task API was added to allow users for uploading images asynchronously " "and for deployers to have more control in the upload process. Unfortunately, " "this API has not worked the way it was expected to. Therefore, the task API " "has entered a deprecation period and it is meant to be replaced by the new " "import API. This change makes the task API admin only by default so that it " "is not accidentally deployed as a public API." msgstr "" "タスク API は、ユーザーがイメージを非同期にアップロードしたり、デプロイヤーが" "アップロードプロセスをより詳細に制御できるようにするために追加されました。残" "念ながら、この API は期待されたとおりに機能していません。したがって、タスク " "API は廃止予定期間に入り、新しいインポート API に置き換えられます。この変更に" "より、デフォルトではタスク API 管理者のみ作成されるため、誤って公開 API とし" "てデプロイされることはありません。" msgid "" "The use_user_token, admin_user, admin_password, admin_tenant_name, auth_url, " "auth_strategy and auth_region options in the [DEFAULT] configuration section " "in glance-api.conf are deprecated, and will be removed in the O release. See " "https://wiki.openstack.org/wiki/OSSN/OSSN-0060" msgstr "" "glance-api.conf の [DEFAULT] 設定セクションにある use_user_token、" "admin_user、admin_password、admin_tenant_name、auth_url、auth_strategy、およ" "び auth_region オプションは廃止予定となり、O リリースで削除予定です。 " "https://wiki.openstack.org/wiki/OSSN/OSSN-0060 を参照してください。" msgid "" "The version 2.6 API is being introduced as EXPERIMENTAL because it is a " "Minimal Viable Product delivery of the functionality described in the " "`refactored image import`_ specification. As an MVP, the responses " "described in that specification are abbreviated in version 2.6. It is " "expected that version 2.6 will be completed in Queens, but at this time, we " "encourage operators to try out the new functionality, but keep in mind its " "EXPERIMENTAL nature." msgstr "" "バージョン 2.6 API は、`refactored image import`_ 仕様で説明されている機能の " "Minimal Viable Product であるため、実験的に導入されています。MVP としては、そ" "の仕様に記述されている応答はバージョン 2.6 で省略されています。Queens でバー" "ジョン 2.6 が完成する想定であり、現時点では、オペレーターに新しい機能を試して" "みることを奨励しますが、実験的な性質を念頭に置いてください。" msgid "" "There are some limitations with this method of deploying Glance and we do " "not recommend its use in production environments at this time. See the " "`Known Issues`_ section of this document for more information." msgstr "" "Glance を配備するこの方法にはいくつかの制限があり、現時点では本番環境での使用" "を推奨していません。詳細については、このドキュメントの`既知の問題`_セクション" "を参照してください。" msgid "" "There was a bug in the **experimental** zero-downtime database upgrade path " "introduced in the Ocata release that prevented the **experimental** upgrade " "from working. This has been fixed in the Pike release. The bug did not " "affect the the normal database upgrade operation." msgstr "" "Ocata リリースで導入された**実験的**ゼロ・ダウンタイム・データベース・アップ" "グレード・パスにバグがあり、**実験的な**アップグレードが機能ませんでした。こ" "れは Pike リリースで修正されています。このバグは、通常のデータベースのアップ" "グレード操作には影響しませんでした。" msgid "" "This change applies only to operators using the VMware datastore or " "filesystem stores" msgstr "" "この変更は、VMware データストアーまたはファイルシステムストアーを使用するオペ" "レータにのみ適用されます。" msgid "" "This change applies only to operators who are using multiple image locations" msgstr "" "この変更は、複数のイメージロケーションを使用するオペレータにのみ適用されま" "す。" msgid "This change applies only to the ``store_type_preference`` option" msgstr "この変更は ``store_type_preference`` オプションにのみ適用されます。" msgid "" "This change is backward compatible, that is, the old names will be " "recognized by the code during the deprecation period. Support for the " "deprecated names will be removed in the **Pike** release" msgstr "" "この変更は下位互換性のため、つまり、廃止予定期間では古い名前がコードによって" "認識されます。廃止予定の名前のサポートは **Pike** リリースで削除されます。" msgid "" "This feature is enabled by default, but it is optional. Whether it is " "offered at your installation depends on the value of the " "``enabled_import_methods`` configuration option in the ``glance-api.conf`` " "file (assuming, of course, that you have not disabled image import at your " "site)." msgstr "" "この機能はデフォルトで有効になっていますが、オプションです。インストール時に" "提供されるかどうかは、``glance-api.conf`` ファイルの " "``enabled_import_methods`` 設定オプションの値によって異なります(もちろん、あ" "なたのサイトでイメージのインポートを無効にしていないと仮定します)。" msgid "" "This has necessitated a change in the location and naming convention for " "migration scripts. Developers, operators, and DevOps are strongly " "encouraged to read through the `Database Management`_ section of the Glance " "documentation for details of the changes introduced in the Ocata release. " "Here's a brief summary of the changes:" msgstr "" "これにより、移行スクリプトの場所と命名規則の変更が必要になりました。開発者、" "オペレーター、および DevOps は、Ocata リリースで導入された変更の詳細につい" "て、Glance のドキュメントの `Database Management`_ セクションを読むことを強く" "お勧めします。変更の概要は次のとおりです。" msgid "" "This is a good time to review your Glance ``policy.json`` file to make sure " "that if it contains a ``default`` target, the rule is fairly restrictive " "(\"role:admin\" or \"!\" are good choices). The ``default`` target is used " "when the policy engine cannot find the target it's looking for. This can " "happen when a new policy is introduced but the policy file in use is from a " "prior release." msgstr "" "Glance の ``policy.json`` ファイルを見直して ``default`` ターゲットが含まれて" "いるかどうか確認する良い機会です。ルールはかなり制限されています ( \"role:" "admin\" や \"!\" は良い選択です)。``default`` ターゲットは、ポリシーエンジン" "が探しているターゲットを見つけることができない場合に使用されます。これは、新" "しいポリシーが導入されたが、使用中のポリシーファイルが以前のリリースのもので" "ある場合に発生する可能性があります。" msgid "" "This point release contains minor changes to keep the Ocata release of " "Glance stable with respect to current operating system packages." msgstr "" "このポイントリリースには、現在のオペレーティングシステムパッケージを尊重す" "る、Glance の Ocata リリースを安定に保つための小さな変更が含まれています。" msgid "" "This point release contains minor changes to keep the Pike release of Glance " "stable with respect to current operating system packages." msgstr "" "このポイントリリースには、現在のオペレーティングシステムパッケージを尊重す" "る、Glance の Pike リリースを安定に保つための小さな変更が含まれています。" msgid "This release has impact on API behavior." msgstr "このリリースは、API の動作に影響があります。" msgid "" "This release prevents non-admin user to change 'size' and 'checksum' " "properties of an image after it has been deactivated via Images API v1." msgstr "" "このリリースでは、Images API v1 を使用して無効にされた後のイメージの 'size' " "と 'checksum' のプロパティを、管理者以外のユーザーは変更できなくなりました。" msgid "" "This renders the configuration options incapable of taking certain values " "that may have been accepted before but were actually invalid." msgstr "" "これにより、設定オプションは、以前は受け入れ可能だが実際は無効だった特定の値" "を受け入れなくなります。" msgid "" "This will result into a non-backward compatible experience before and after " "**Newton** release, for users using ``add`` feature to image locations." msgstr "" "これにより、イメージの場所に``追加``機能を使用しているユーザーには、" "**Newton** リリース前後で後方互換性のない経験が得られます。" msgid "" "Those images currently with 'private' visibility (that is, images for which " "'is_public' is False in the database) and that have **no** image members, " "will have their visibility set to 'private'." msgstr "" "'private' 可視性を持ち (つまり、'is_public' がデータベースで False であるイ" "メージ) 、かつイメージメンバーが**いない**イメージは、可視性が 'private' に設" "定されます。" msgid "" "To partially fix an important image locations bug 1587985, an API impacting " "change has been merged into Glance." msgstr "" "重要なイメージの場所のバグ 1587985 を部分的に修正するために、API に影響を与え" "る変更が Glance にマージされました。" msgid "" "To summarize: end users do **not** need access to the Tasks API in order to " "use the new interoperable image import process. They do, however, need " "permission to access internal Glance task objects." msgstr "" "要約すると、エンドユーザーは、新しい相互運用可能なイメージインポートプロセス" "を使用するために Tasks API にアクセスする必要は**ありません**。ただし、内部" "の Glance タスクオブジェクトにアクセスする権限が必要です。" msgid "Translations have been synced from Zanata." msgstr "翻訳は Zanata から同期されます。" msgid "Translations have been updated." msgstr "翻訳が更新されました。" msgid "" "Until now every run of the oslo-config-generator resulted in random ordering " "of the store drivers configuration. After **Newton** release this order will " "remain consistent." msgstr "" "これまでは、oslo-config-generator を実行する度に、ストアードライバーの設定が" "ランダムに順序付けされていました。**Newton** リリース後、この順序は一貫してい" "ます。" msgid "" "Until now, no image status checks were in place while **adding** a location " "on it. In some circumstances, this may result in a bad user experience. It " "may also cause problems for a security team evaluating the condition of an " "image in ``deactivated`` status." msgstr "" "これまでは、場所を**追加**するときに、イメージのステータスチェックは行われて" "いませんでした。場合によっては、これによりユーザーエクスペリエンスが低下する" "可能性があります。また、セキュリティチームが ``deactivated`` にあるイメージの" "状態を評価する際に問題を引き起こす可能性があります。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "" "Use of the plugin requires configuration as described in the `The Image " "Property Injection Plugin`_ section of the Glance Admin Guide." msgstr "" "プラグインを使用するには、Glance Admin Guide の `The Image Property " "Injection Plugin`_ セクションに記載されている設定が必要です。" msgid "" "Use the v1 API to update the image so that ``is_public`` is False. This " "will reset the image's visibility to 'shared', and it will now accept member " "operations." msgstr "" "v1 API を使用してイメージを更新し、``is_public`` が False になるようにしま" "す。これにより、イメージの可視性が 'shared' にリセットされ、メンバー操作を受" "け入れるようになります。" msgid "" "Use the v2 API to change the visibility of the image to 'shared'. Then it " "will accept members in either the v1 or v2 API." msgstr "" "v2 API を使用して、イメージの可視性を 'shared' に変更します。その後、v1 また" "は v2 API でメンバーを受け入れます。" msgid "Using db check" msgstr "db check の使用" msgid "" "Values which do not comply with the new restrictions will prevent the " "service from starting. The logs will contain a message indicating the " "problematic configuration option and the reason why the supplied value has " "been rejected." msgstr "" "新しい制限に準拠していない値は、サービスの開始を妨げます。ログには、問題のあ" "る設定オプションと、指定された値が拒否された理由を示すメッセージが含まれま" "す。" msgid "Visibility migration of current images" msgstr "現在のイメージの可視性の移行" msgid "" "We have tried to minimize the impact upon end users, but want to point out " "some issues to be aware of." msgstr "" "私たちはエンドユーザーへの影響を最小限に抑えようとしましたが、認識すべきいく" "つかの問題を示したいと思います。" msgid "" "We mention this so that you can be aware of this situation in your own " "testing." msgstr "あなた自身のテストでこの状況を知ることができるように、これを示します。" msgid "" "We recommend that all operators adopt the policy settings just described " "independently of the decision whether to expose the EXPERIMENTAL version 2.6 " "API." msgstr "" "実験的なバージョン 2.6 API を公開するかどうかの決定とは関係なく、すべてのオペ" "レータがこのポリシー設定を採用することを推奨します。" msgid "" "We strongly encourage operators to modify their ``glance-api.conf`` files " "immediately to use the **new** names" msgstr "" "我々は、オペレータが ``glance-api.conf`` ファイルを直ちに修正して**新しい**名" "前を使用することを強く推奨します。" msgid "" "When ``enable_image_import`` is **True**, a new import-method, ``web-" "download`` is available. (In Pike, only ``glance-direct`` was offered.) " "Which import-methods you offer can be configured using the " "``enabled_import_methods`` option in the ``glance-api.conf`` file." msgstr "" "``enable_image_import`` が **True** のとき、新しいインポートメソッド ``web-" "download`` が利用できます。( Pike では ``glance-direct`` だけが提供されていま" "した。)\n" " あなたが提供する import-methods は ``glance-api.conf`` ファイルの " "``enabled_import_methods`` オプションを使って設定できます。" msgid "" "While the 2.6 API is CURRENT, whether the interoperable image import " "functionality it makes available is exposed to end users is controlled by a " "configuration option, ``enable_image_import``. Although this option existed " "in the previous release, its effect is slightly different in Queens." msgstr "" "2.6 API が現行バージョンである間に、相互運用可能なイメージインポート機能がエ" "ンドユーザーに公開されるかどうかは、設定オプション ``enable_image_import`` に" "よって制御されます。 このオプションは以前のリリースに存在しましたが、その影響" "は Queens では少々異なりますです。" msgid "" "With the deprecation of the Images (Glance) version 1 API in the Newton " "release, it is subject to removal on or after the Pike release. The " "configuration options specific to the Images (Glance) v1 API have also been " "deprecated and are subject to removal. An indirectly related configuration " "option enable_v2_api has been deprecated too as it becomes redundant once " "the Images (Glance) v1 API is removed. Appropriate warning messages have " "been setup for the deprecated configuration options and when the Images " "(Glance) v1 API is enabled (being used). Operators are advised to deploy the " "Images (Glance) v2 API. The standard OpenStack deprecation policy will be " "followed for the removals." msgstr "" "Newton リリースでの Images (Glance) バージョン 1 API の廃止予定に伴い、Pike " "リリース以降での削除対象になりました。Images (Glance) v1 API に固有の設定オプ" "ションも廃予定となり、削除対象です。間接的に関連する設定オプション " "enable_v2_api も、Images (Glance) v1 API が削除されると冗長になるため、廃止予" "定になりました。廃止予定の設定オプション、および Images (Glance) v1 API が有" "効(使用中)の場合、適切な警告メッセージが設定されています。オペレーターは、" "Images (Glance) v2 API を導入することをお勧めします。標準的な OpenStack の廃" "止予定ポリシーに従って、削除される予定です。" msgid "" "You may set the ``timeout`` option in the ``keystone_authtoken`` group in " "the **glance-api.conf** file." msgstr "" "**glance-api.conf** ファイルの ``keystone_authtoken`` グループで ``timeout`` " "オプションを設定することができます。" msgid "You may upgrade these definitions using:" msgstr "これらの定義は、次の方法でアップグレードできます。" msgid "``enable_image_import``" msgstr "``enable_image_import``" msgid "``enable_image_import`` is **True** by default (in Pike it was False)" msgstr "" "``enable_image_import`` は **True** がデフォルトです (Pike では False でした)" msgid "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgstr "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgid "``node_staging_uri``" msgstr "``node_staging_uri``" msgid "bug 1532243: glance fails silently if a task flow can not be loaded" msgstr "" "バグ 1532243: タスクフローをロードできない場合、glance は暗黙的に失敗する" msgid "" "bug 1533949: Glance tasks missing configuration item \"conversion_format\"" msgstr "" "バグ 1533949: Glance タスクに設定項目 \"conversion_format\" がありません" msgid "" "bug 1535231: md-meta with case insensitive string has problem during creating" msgstr "" "バグ 1535231: 大文字と小文字を区別しない文字列を含む md-meta は、作成中に問題" "を起こす" msgid "bug 1543937: db-purge fails for very large number" msgstr "バグ 1543937: 非常に大きな数によって、db-purge が失敗する" msgid "bug 1555275: Tags set changes on delete" msgstr "バグ 1555275: 削除時にタグが変更される" msgid "bug 1557495: Possible race conditions during status change" msgstr "バグ 1557495: ステータス変更中の可能な競合状態" msgid "bug 1558683: Versions endpoint does not support X-Forwarded-Proto" msgstr "" "バグ 1558683: Versions エンドポイントが X-Forwarded-Proto をサポートしない" msgid "bug 1568723: secure_proxy_ssl_header not in sample configuration files" msgstr "" "バグ 1568723: secure_proxy_ssl_header がサンプル設定ファイルに存在しない" msgid "" "bug 1568894: glance_store options missing in glance-scrubber.conf and glance-" "cache.conf sample files" msgstr "" "バグ 1568894: glance_store オプションが glance-scrubber.conf と glance-cache." "conf サンプルファイルにない" msgid "" "bug 1570789: Metadefs API returns 500 error when 4 byte unicode character is " "passed" msgstr "" "バグ 1570789: 4 バイト unicode 文字列が渡された時、Metadefs API が 500 エラー" "を返す" msgid "" "bug 1580848: There's no exception when import task is created without " "properties" msgstr "" "バグ 1580848: インポートタスクがプロパティなしで作成された時に例外がない" msgid "bug 1582304: Allow tests to run when http proxy is set" msgstr "バグ 1582304: http proxy が設定された時、テストを実行できるようにする" msgid "bug 1584076: Swift ACLs disappears on v1 Glance images" msgstr "バグ 1584076: Swift ACLs が v1 Glance images からなくなった" msgid "" "bug 1584350: etc/glance-registry.conf sample file has redundant store section" msgstr "" "バグ 1584350: etc/glance-registry.conf サンプルファイルに冗長な store セク" "ションがある" msgid "" "bug 1584415: Listing images with the created_at and updated_at filters fails " "if an operator is not specified" msgstr "" "バグ 1584415: created_at および updated_at フィルターを使ったイメージのリスト" "作成で演算子が指定されていないと失敗する" msgid "bug 1585584: Glare v0.1 is unable to create public artifact draft" msgstr "バグ 1585584: Glare v0.1 で public artifact draft が作成できない" msgid "" "bug 1585917: member-create will raise 500 error if member-id is greater than " "255 characters" msgstr "" "バグ 1585917: member-id が 255 文字より大きいと member-create で 500 エラーが" "発生する" msgid "" "bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo." "middleware library" msgstr "" "バグ 1590608: サービスは oslo.middleware から http_proxy_to_wsgi ミドルウェア" "を使用すべき" msgid "" "bug 1591004: Unable to download image with no checksum when cache is enabled" msgstr "" "バグ 1591004: キャッシュが有効の時、チェックサムなしのイメージがダウンロード" "できない" msgid "" "bug 1593177: The default policy needs to be admin for safer default " "deployment scenarios" msgstr "" "バグ 1593177: より安全なデフォルトのデプロイシナリオには、デフォルトのポリ" "シーが管理者になる必要がある" msgid "" "bug 1598985: glance-replicator compare output should show image name in " "addition to image id for missing images" msgstr "" "バグ 1598985: glance-replicator compare output は、消失したイメージにはイメー" "ジ ID に加えてイメージ名を表示すべき" msgid "" "bug 1599169: glance-replicator size raises object of type 'NoneType' has no " "len() exception when no args provided" msgstr "" "バグ 1599169: glance-replicator size は、引数が与えられない場合に 'NoneType' " "タイプのオブジェクトは len() を持たない、という例外を上げる" msgid "bug 1599192: glance-replicator needs to display human-readable size" msgstr "" "バグ 1599192: glance-replicator 人間が読むことができるサイズを表示する必要が" "ある" msgid "bug 1602081: Glance needs to use oslo.context's policy dict" msgstr "バグ 1602081: Glance は oslo.context のポリシー辞書を使う必要がある" msgid "" "bug 1609571: version negotiation api middleware was NOT up to date to " "include v2.3" msgstr "" "バグ 1609571: version negotiation api ミドルウェアは v2.3 を含み最新でない" msgid "bug 1612341: Add cpu thread pinning flavor metadef" msgstr "bug 1612341: CPU スレッドピニングのフレーバーメタ定義を追加。" msgid "bug 1617258: Image signature base64 needs to wrap lines" msgstr "bug 1617258: イメージ署名の base64 は、行を折り返す必要があります。" msgid "the options in the ``[task]`` group" msgstr "``[task]`` グループのオプション" msgid "the options in the ``[taskflow_executor]`` group" msgstr "``[taskflow_executor]`` グループのオプション" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.7862947 glance-29.0.0/releasenotes/source/locale/ko_KR/0000775000175000017500000000000000000000000021313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000023100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po0000664000175000017500000003101300000000000026127 0ustar00zuulzuul00000000000000# Sungjin Kang , 2017. #zanata # Andreas Jaeger , 2019. #zanata # Hongjae Kim , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: Glance Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-04-09 18:18+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-11-17 04:51+0000\n" "Last-Translator: Andreas Jaeger \n" "Language-Team: Korean (South Korea)\n" "Language: ko_KR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "'community' - the image is available for consumption by all users" msgstr "'community' - 모든 사용자가 이미지를 사용할 수 있습니다." msgid "'private' - the image is accessible only to its owner" msgstr "'private' - 소유자만 접근이 가능합니다." msgid "" "'shared' - the image is completely accessible to the owner and available for " "consumption by any image members" msgstr "" "'공유' - 이미지는 완전히 소유자에게 접근할 수 있고 다른 어떤 이미지 멤버들에 " "의해 사용될 수 있다." msgid "11.0.1" msgstr "11.0.1" msgid "11.0.2" msgstr "11.0.2" msgid "12.0.0" msgstr "12.0.0" msgid "12.0.0-20" msgstr "12.0.0-20" msgid "13.0.0" msgstr "13.0.0" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "15.0.0" msgstr "15.0.0" msgid "15.0.1" msgstr "15.0.1" msgid "16.0.0" msgstr "16.0.0" msgid "16.0.1" msgstr "16.0.1" msgid "16.0.1-11" msgstr "16.0.1-11" msgid "17.0.0" msgstr "17.0.0" msgid "18.0.0" msgstr "18.0.0" msgid "19.0.0" msgstr "19.0.0" msgid "Add ``ploop`` to the list of supported disk formats." msgstr "지원되는 디스크 포멧 목록에 ``ploop`` 가 추가되었습니다." msgid "Add ``vhdx`` to list of supported disk format." msgstr "지원되는 디스크 포멧 목록에 ``vhdx`` 가 추가되었습니다." msgid "Bug 1483353 v1 Updates using x-image-meta-id header provoke E500 or 200" msgstr "" "Bug 1483353 v1 Updates using x-image-meta-id header provoke E500 or 200" msgid "Bug 1504184 Glance does not error gracefully on token validation error" msgstr "Bug 1504184 Glance does not error gracefully on token validation error" msgid "" "Bug 1505474 Glance raise 500 error when delete images with unallowed status " "change" msgstr "" "Bug 1505474 Glance raise 500 error when delete images with unallowed status " "change" msgid "" "Bug 1505675 Flaky tasks test glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time" msgstr "" "Bug 1505675 Flaky tasks test glance.tests.unit.v2.test_tasks_resource." "TestTasksController.test_create_with_live_time" msgid "Bug 1505710 Wrong logging setup in replicator" msgstr "Bug 1505710 Wrong logging setup in replicator" msgid "" "Bug 1512369 glance should declare a test-requirements.txt on swiftclient " "(for config generator)" msgstr "" "Bug 1512369 glance should declare a test-requirements.txt on swiftclient " "(for config generator)" msgid "" "Bug 1517060 Users (without admin privileges) can change ACTIVE_IMMUTABLE " "properties of their own images when deactivated." msgstr "" "Bug 1517060 Users (without admin privileges) can change ACTIVE_IMMUTABLE " "properties of their own images when deactivated." msgid "" "Bug 1522132 Scrubber tests are broken due to deprecated config " "filesystem_store_datadir under DEFAULT section" msgstr "" "Bug 1522132 Scrubber tests are broken due to deprecated config " "filesystem_store_datadir under DEFAULT section" msgid "Bug Fixes" msgstr "버그 수정" msgid "Critical Issues" msgstr "치명적인 이슈" msgid "Current Series Release Notes" msgstr "현재 시리즈 릴리즈 노트" msgid "" "Deprecated \"sign-the-hash\" approach for image signing. Old run_tests and " "related scripts have been removed." msgstr "" "이미지 서명을 위해 \"sign-the-hash\" 접근 방식이 더 이상 사용되지 않습니다. " "이전 run_tests 와 관련 스크립트가 삭제되었습니다." msgid "Deprecation Notes" msgstr "지원 종료된 기능 노트" msgid "" "Expired tasks are now deleted in Glance. As with other Glance resources, " "this is a \"soft\" deletion, that is, a deleted task is marked as " "``deleted`` in the database so that the task will not appear in API " "responses, but the information associated with the task persists in the " "database." msgstr "" "만료된 작업이 이제 Glance 에서 삭제됩니다. 다른 Glance 리소스와 마찬가지로 이" "것은 \"soft\" 삭제입니다. 즉, 삭제된 작업은 데이터베이스에서 ``deleted`` 로 " "표시되어 작업이 API 응답에 나타나지는 않지만 작업과 관련된 정보는 데이터베이" "스에 계속 유지됩니다." msgid "Expired tasks are now deleted." msgstr "만료된 작업은 이제 삭제됩니다." msgid "For example, ``GET v2/images?visibility=community``" msgstr "예를 들어, ``GET v2/images?visibility=community``" msgid "Glance API **CURRENT** ``minor`` version is now ``2.4``." msgstr "Glance API **CURRENT** ``minor`` 버전은 ``2.4`` 입니다." msgid "Glance API ``minor`` version bumped to 2.4." msgstr "Glance API ``minor`` 버전이 2.4 으로 변경되었습니다." msgid "Glance Release Notes" msgstr "Keystone 릴리즈 노트" msgid "Image 'visibility' changes." msgstr "이미지 'visivility' 가 변경되었습니다." msgid "Impact of the Ocata visibility changes on the Images API v1" msgstr "Ocata 가시성 변경이 Images API v1에 미치는 영향" msgid "Improved configuration option descriptions and handling." msgstr "향상된 구성 옵션 설명 및 처리." msgid "Liberty Series Release Notes" msgstr "Liberty 시리즈 릴리즈 노트" msgid "" "Location updates for images are now restricted to images in ``active`` or " "``queued`` status. Please refer to the \"Bug Fixes\" section for more " "information." msgstr "" "이미지에대한 위치 업데이트는 이제 ``active`` 나 ``queued`` 상태의 이미지로 제" "한됩니다. 자세한 내용은 \"버그 수정\" 절을 참조하십시오." msgid "Mitaka Series Release Notes" msgstr "Mitaka 시리즈 릴리즈 노트" msgid "New Features" msgstr "새로운 기능" msgid "Newton Series Release Notes" msgstr "Newton 시리즈 릴리즈 노트" msgid "" "Note that such images will have to have their visibility updated to 'shared' " "before they will accept members." msgstr "" "Note 이러한 이미지는 멤버가 수락하기 전에 공개 상태가 'shared' 로 업데이트가 " "되어야 합니다." msgid "Note the code name for the \"ceph\" driver is ``rbd``." msgstr "Note \"ceph\" 드라이버의 코드 이름은 ``rdb`` 입니다." msgid "Ocata Series Release Notes" msgstr "Ocata 시리즈 릴리즈 노트" msgid "Other Notes" msgstr "기타 기능" msgid "Security Issues" msgstr "보안 이슈" msgid "Start using reno to manage release notes." msgstr "Reno 를 사용하여 릴리즈 노트를 관리하기 시작하였습니다." msgid "" "The 'visibility' enumeration has been increased from two values (``public``, " "``private``) to four values (``public``, ``private``, ``shared``, and " "``community``)." msgstr "" "'visibility' 열거 값이 두 값 (``public``, ``private``) 에서 네 값 " "(``public``, ``private``, ``shared``, ``community``) 값으로 증가하였습니다." msgid "" "The **CURRENT** version of the version 2 Images API supplied by Glance is " "now **2.5**. Changes include:" msgstr "" "Glance 에서 제공하는 버전 2 Images API 의 **CURRENT** 버전은 현재 **2.5** 입" "니다. 변경사항은 다음과 같습니다:" msgid "The *minor* version of the Images API v2 is bumped to **2.5**." msgstr "Imanges API v2 *minor* 버전은 **2.5** 로 변경되었습니다." msgid "Upgrade Notes" msgstr "업그레이드 노트" msgid "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgstr "" "``glance-manage db load_metadefs [--path ] [--merge] [--prefer_new]``" msgid "bug 1532243: glance fails silently if a task flow can not be loaded" msgstr "bug 1532243: glance fails silently if a task flow can not be loaded" msgid "" "bug 1533949: Glance tasks missing configuration item \"conversion_format\"" msgstr "" "bug 1533949: Glance tasks missing configuration item \"conversion_format\"" msgid "" "bug 1535231: md-meta with case insensitive string has problem during creating" msgstr "" "bug 1535231: md-meta with case insensitive string has problem during creating" msgid "bug 1543937: db-purge fails for very large number" msgstr "bug 1543937: db-purge fails for very large number" msgid "bug 1555275: Tags set changes on delete" msgstr "bug 1555275: Tags set changes on delete" msgid "bug 1557495: Possible race conditions during status change" msgstr "bug 1557495: Possible race conditions during status change" msgid "bug 1558683: Versions endpoint does not support X-Forwarded-Proto" msgstr "bug 1558683: Versions endpoint does not support X-Forwarded-Proto" msgid "bug 1568723: secure_proxy_ssl_header not in sample configuration files" msgstr "bug 1568723: secure_proxy_ssl_header not in sample configuration files" msgid "" "bug 1568894: glance_store options missing in glance-scrubber.conf and glance-" "cache.conf sample files" msgstr "" "bug 1568894: glance_store options missing in glance-scrubber.conf and glance-" "cache.conf sample files" msgid "" "bug 1570789: Metadefs API returns 500 error when 4 byte unicode character is " "passed" msgstr "" "bug 1570789: Metadefs API returns 500 error when 4 byte unicode character is " "passed" msgid "" "bug 1580848: There's no exception when import task is created without " "properties" msgstr "" "bug 1580848: There's no exception when import task is created without " "properties" msgid "bug 1582304: Allow tests to run when http proxy is set" msgstr "bug 1582304: Allow tests to run when http proxy is set" msgid "bug 1584076: Swift ACLs disappears on v1 Glance images" msgstr "bug 1584076: Swift ACLs disappears on v1 Glance images" msgid "" "bug 1584350: etc/glance-registry.conf sample file has redundant store section" msgstr "" "bug 1584350: etc/glance-registry.conf sample file has redundant store section" msgid "" "bug 1584415: Listing images with the created_at and updated_at filters fails " "if an operator is not specified" msgstr "" "bug 1584415: Listing images with the created_at and updated_at filters fails " "if an operator is not specified" msgid "bug 1585584: Glare v0.1 is unable to create public artifact draft" msgstr "bug 1585584: Glare v0.1 is unable to create public artifact draft" msgid "" "bug 1585917: member-create will raise 500 error if member-id is greater than " "255 characters" msgstr "" "bug 1585917: member-create will raise 500 error if member-id is greater than " "255 characters" msgid "" "bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo." "middleware library" msgstr "" "bug 1590608: Services should use http_proxy_to_wsgi middleware from oslo." "middleware library" msgid "" "bug 1591004: Unable to download image with no checksum when cache is enabled" msgstr "" "bug 1591004: Unable to download image with no checksum when cache is enabled" msgid "" "bug 1593177: The default policy needs to be admin for safer default " "deployment scenarios" msgstr "" "bug 1593177: The default policy needs to be admin for safer default " "deployment scenarios" msgid "" "bug 1598985: glance-replicator compare output should show image name in " "addition to image id for missing images" msgstr "" "bug 1598985: glance-replicator compare output should show image name in " "addition to image id for missing images" msgid "" "bug 1599169: glance-replicator size raises object of type 'NoneType' has no " "len() exception when no args provided" msgstr "" "bug 1599169: glance-replicator size raises object of type 'NoneType' has no " "len() exception when no args provided" msgid "bug 1599192: glance-replicator needs to display human-readable size" msgstr "bug 1599192: glance-replicator needs to display human-readable size" msgid "bug 1602081: Glance needs to use oslo.context's policy dict" msgstr "bug 1602081: Glance needs to use oslo.context's policy dict" msgid "" "bug 1609571: version negotiation api middleware was NOT up to date to " "include v2.3" msgstr "" "bug 1609571: version negotiation api middleware was NOT up to date to " "include v2.3" msgid "bug 1612341: Add cpu thread pinning flavor metadef" msgstr "bug 1612341: Add cpu thread pinning flavor metadef" msgid "bug 1617258: Image signature base64 needs to wrap lines" msgstr "bug 1617258: Image signature base64 needs to wrap lines" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000023200000000000021044 0ustar00zuulzuul00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/newton.rst0000664000175000017500000000026700000000000021120 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton :earliest-version: 13.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020663 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020531 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/queens.rst0000664000175000017500000000031300000000000021076 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens :ignore-notes: pike-rc-2-acc173005045e16a.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000020723 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020716 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020722 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000021725 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021125 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/victoria.rst0000664000175000017500000000020700000000000021420 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: victoria-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000020300000000000021227 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: wallaby-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/xena.rst0000664000175000017500000000016700000000000020540 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: xena-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/yoga.rst0000664000175000017500000000016700000000000020544 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: yoga-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/releasenotes/source/zed.rst0000664000175000017500000000016300000000000020363 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: zed-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/requirements.txt0000664000175000017500000000255500000000000016351 0ustar00zuulzuul00000000000000pbr>=3.1.1 # Apache-2.0 defusedxml>=0.7.1 # PSF # < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983 SQLAlchemy>=1.3.14 # MIT eventlet>=0.33.3 # MIT PasteDeploy>=1.5.0 # MIT Routes>=2.3.1 # MIT WebOb>=1.8.1 # MIT alembic>=0.9.6 # MIT httplib2>=0.9.1 # MIT oslo.config>=8.1.0 # Apache-2.0 oslo.concurrency>=4.5.1 # Apache-2.0 oslo.context>=2.22.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=4.7.0 # Apache-2.0 stevedore!=3.0.0,>=1.20.0 # Apache-2.0 futurist>=1.2.0 # Apache-2.0 taskflow>=4.0.0 # Apache-2.0 keystoneauth1>=3.4.0 # Apache-2.0 keystonemiddleware>=5.1.0 # Apache-2.0 WSME>=0.8.0 # MIT PrettyTable>=0.7.1 # BSD # For paste.util.template used in keystone.common.template Paste>=2.0.2 # MIT jsonschema>=3.2.0 # MIT python-keystoneclient>=3.8.0 # Apache-2.0 oslo.db>=5.0.0 # Apache-2.0 oslo.i18n>=5.0.0 # Apache-2.0 oslo.limit>=1.6.0 # Apache-2.0 oslo.log>=4.5.0 # Apache-2.0 oslo.messaging>=5.29.0,!=9.0.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 oslo.policy>=4.4.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 # Glance Store glance-store>=2.3.0 # Apache-2.0 debtcollector>=1.19.0 # Apache-2.0 cryptography>=2.6.1 # BSD/Apache-2.0 cursive>=0.2.1 # Apache-2.0 # timeutils iso8601>=0.1.11 # MIT os-win>=4.0.1 # Apache-2.0 castellan>=0.17.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/setup.cfg0000664000175000017500000000575100000000000014707 0ustar00zuulzuul00000000000000[metadata] name = glance summary = OpenStack Image Service description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/glance/latest/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 [files] data_files = etc/glance = etc/glance-api.conf etc/glance-cache.conf etc/glance-manage.conf etc/glance-scrubber.conf etc/glance-api-paste.ini etc/glance/metadefs = etc/metadefs/* packages = glance [entry_points] console_scripts = glance-api = glance.cmd.api:main glance-cache-prefetcher = glance.cmd.cache_prefetcher:main glance-cache-pruner = glance.cmd.cache_pruner:main glance-cache-manage = glance.cmd.cache_manage:main glance-cache-cleaner = glance.cmd.cache_cleaner:main glance-control = glance.cmd.control:main glance-manage = glance.cmd.manage:main glance-replicator = glance.cmd.replicator:main glance-scrubber = glance.cmd.scrubber:main glance-status = glance.cmd.status:main wsgi_scripts = glance-wsgi-api = glance.common.wsgi_app:init_app oslo.config.opts = glance.api = glance.opts:list_api_opts glance.scrubber = glance.opts:list_scrubber_opts glance.cache= glance.opts:list_cache_opts glance.manage = glance.opts:list_manage_opts glance = glance.opts:list_image_import_opts oslo.config.opts.defaults = glance.api = glance.common.config:set_config_defaults glance.database.metadata_backend = sqlalchemy = glance.db.sqlalchemy.metadata oslo.policy.enforcer = glance = glance.api.policy:get_enforcer oslo.policy.policies = glance = glance.policies:list_rules glance.flows = api_image_import = glance.async_.flows.api_image_import:get_flow import = glance.async_.flows.base_import:get_flow location_import = glance.async_.flows.location_import:get_flow glance.flows.import = convert = glance.async_.flows.convert:get_flow introspect = glance.async_.flows.introspect:get_flow ovf_process = glance.async_.flows.ovf_process:get_flow glance.image_import.plugins = no_op = glance.async_.flows.plugins.no_op:get_flow inject_image_metadata=glance.async_.flows.plugins.inject_image_metadata:get_flow image_conversion=glance.async_.flows.plugins.image_conversion:get_flow image_decompression=glance.async_.flows.plugins.image_decompression:get_flow glance.image_import.internal_plugins = web_download = glance.async_.flows._internal_plugins.web_download:get_flow copy_image = glance.async_.flows._internal_plugins.copy_image:get_flow glance_download = glance.async_.flows._internal_plugins.glance_download:get_flow [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/setup.py0000664000175000017500000000137600000000000014577 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/test-requirements.txt0000664000175000017500000000143100000000000017316 0ustar00zuulzuul00000000000000# Hacking already pins down pep8, pyflakes and flake8 hacking>=6.1.0,<6.2.0 # Apache-2.0 # Needed for testing coverage!=4.4,>=4.0 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD requests>=2.18.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT psutil>=3.2.2 # BSD oslotest>=3.2.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 doc8>=0.8.1 # Apache-2.0 Pygments>=2.2.0 # BSD license boto3>=1.9.199 # Apache-2.0 # Optional packages that should be installed when testing PyMySQL>=0.7.6 # MIT License psycopg2>=2.8.4 # LGPL/ZPL xattr>=0.9.2;sys_platform!='win32' # MIT python-swiftclient>=3.2.0 # Apache-2.0 python-cinderclient>=4.1.0 # Apache-2.0 os-brick>=3.1.0 oslo.privsep>=1.32.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867985.9543142 glance-29.0.0/tools/0000775000175000017500000000000000000000000014216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/tools/test-setup.sh0000775000175000017500000000616000000000000016675 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest function set_conf_line { # parameters: file regex value # check if the regex occurs in the file # If so, replace with the value. # If not, append the value to the end of the file. sudo sh -c "grep -q -e '$2' $1 && \ sed -i 's|$2|$3|g' $1 || \ echo '$3' >> $1" } if $(egrep -q "^.*(centos:centos:|cloudlinux:cloudlinux:|redhat:enterprise_linux:)[78].*$" /etc/*release); then # mysql needs to be started on centos/rhel sudo systemctl restart mariadb.service # postgres setup for centos # make sure to use scram-sha-256 instead of md5 for fips! sudo postgresql-setup --initdb PG_CONF=/var/lib/pgsql/data/postgresql.conf set_conf_line $PG_CONF '^password_encryption =.*' 'password_encryption = scram-sha-256' PG_HBA=/var/lib/pgsql/data/pg_hba.conf set_conf_line $PG_HBA '^local[ \t]*all[ \t]*all.*' 'local all all peer' set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*127.0.0.1\/32.*' 'host all all 127.0.0.1/32 scram-sha-256' set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*::1\/128.*' 'host all all ::1/128 scram-sha-256' sudo systemctl restart postgresql.service fi sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/tools/test_format_inspector.py0000775000175000017500000001034300000000000021211 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This is a helper tool to test Glance's stream-based format inspection.""" # Example usage: # # test_format_inspector.py -f qcow2 -v -i ~/cirros-0.5.1-x86_64-disk.img import argparse import logging import sys from oslo_utils import units from glance.common import format_inspector from glance.tests.unit.common import test_format_inspector def main(): formats = ['raw', 'qcow2', 'vhd', 'vhdx', 'vmdk', 'vdi'] parser = argparse.ArgumentParser() parser.add_argument('-d', '--debug', action='store_true') parser.add_argument('-f', '--format', default='raw', help='Format (%s)' % ','.join(sorted(formats))) parser.add_argument('-b', '--block-size', default=65536, type=int, help='Block read size') parser.add_argument('--context-limit', default=(1 * 1024), type=int, help='Maximum memory footprint (KiB)') parser.add_argument('-i', '--input', default=None, help='Input file. Defaults to stdin') parser.add_argument('-v', '--verify', action='store_true', help=('Verify our number with qemu-img ' '(requires --input)')) args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) fmt = format_inspector.get_inspector(args.format)(tracing=args.debug) if args.input: input_stream = open(args.input, 'rb') else: input_stream = sys.stdin.buffer stream = format_inspector.InfoWrapper(input_stream, fmt) count = 0 found_size = False while True: chunk = stream.read(int(args.block_size)) # This could stream to an output destination or stdin for testing # sys.stdout.write(chunk) if not chunk: break count += len(chunk) if args.format != 'raw' and not found_size and fmt.virtual_size != 0: # Print the point at which we've seen enough of the file to # know what the virtual size is. This is almost always less # than the raw_size print('Determined virtual size at byte %i' % count) found_size = True if fmt.format_match: print('Source was %s file, virtual size %i MiB (%i bytes)' % ( fmt, fmt.virtual_size / units.Mi, fmt.virtual_size)) else: print('*** Format inspector did not detect file as %s' % args.format) print('Raw size %i MiB (%i bytes)' % (fmt.actual_size / units.Mi, fmt.actual_size)) print('Required contexts: %s' % str(fmt.context_info)) mem_total = sum(fmt.context_info.values()) print('Total memory footprint: %i bytes' % mem_total) # To make sure we're not storing the whole image, complain if the # format inspector stored more than context_limit data if mem_total > args.context_limit * 1024: print('*** ERROR: Memory footprint exceeded!') if args.verify and args.input: size = test_format_inspector.get_size_from_qemu_img(args.input) if size != fmt.virtual_size: print('*** QEMU disagrees with our size of %i: %i' % ( fmt.virtual_size, size)) else: print('Confirmed size with qemu-img') print('Image safety check: %s' % ( fmt.safety_check() and 'passed' or 'FAILED')) if args.input: detected_fmt = format_inspector.detect_file_format(args.input) print('Detected inspector for image as: %s' % ( detected_fmt.__class__.__name__)) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867955.0 glance-29.0.0/tox.ini0000664000175000017500000001304300000000000014372 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 # python runtimes: https://governance.openstack.org/tc/reference/runtimes/ussuri.html envlist = functional-py39,py39,pep8 skip_missing_interpreters = true [testenv] setenv = # NOTE(hemanthm): The environment variable "OS_TEST_DBAPI_ADMIN_CONNECTION" # must be set to force oslo.db tests to use a file-based sqlite database # instead of the default in-memory database, which doesn't work well with # alembic migrations. The file-based database pointed by the environment # variable itself is not used for testing. Neither is it ever created. Oslo.db # creates another file-based database for testing purposes and deletes it as a # part of its test clean-up. Think of setting this environment variable as a # clue for oslo.db to use file-based database. OS_TEST_DBAPI_ADMIN_CONNECTION=sqlite:////tmp/placeholder-never-created-nor-used.db # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 usedevelop = True install_command = python -m pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = find . -type f -name "*.pyc" -delete stestr run --slowest {posargs} allowlist_externals = bash find rm passenv = *_proxy *_PROXY [testenv:functional] setenv = TEST_PATH = ./glance/tests/functional # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 commands = stestr run {posargs} [testenv:functional-py{38,39,310,311,312}] setenv = {[testenv:functional]setenv} commands = {[testenv:functional]commands} [testenv:functional-py38-rbac] setenv = {[testenv:functional]setenv} OS_GLANCE_TEST_RBAC_DEFAULTS = True commands = {[testenv:functional]commands} [testenv:functional-py39-rbac] setenv = {[testenv:functional]setenv} OS_GLANCE_TEST_RBAC_DEFAULTS = True commands = {[testenv:functional]commands} [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file=etc/glance-policy-generator.conf [testenv:pep8] commands = flake8 {posargs} doc8 {posargs} [testenv:genconfig] commands = oslo-config-generator --config-file etc/oslo-config-generator/glance-api.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-scrubber.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-cache.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-manage.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-image-import.conf [testenv:api-ref] # This environment is called from CI scripts to test and publish # the API Ref to docs.openstack.org. deps = -r{toxinidir}/doc/requirements.txt commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files, and develop mode disabled # explicitly to avoid unnecessarily installing the checked-out repo too (this # further relies on "tox.skipsdist = True" above). deps = bindep commands = bindep test usedevelop = False [doc8] ignore-path = .venv,.git,.tox,.eggs,*glance/locale*,*lib/python*,glance.egg*,api-ref/build,doc/build,doc/source/contributor/api,doc/test [flake8] # TODO(dmllr): Analyze or fix the warnings blacklisted below # E402 module level import not at top of file # E711 comparison to None should be 'if cond is not None:' # E712 comparison to True should be 'if cond is True:' or 'if cond:' # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line # W503 line break before binary operator - conflicting guidance # W504 line break after binary operator - conflicting guidance ignore = E402,E711,E712,H404,H405,W503,W504 enable-extensions = H904 exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build per-file-ignores = glance/tests/functional/__init__.py:E501 [hacking] import_exceptions = glance.i18n [flake8:local-plugins] extension = G316 = checks:assert_true_instance G317 = checks:assert_equal_type G318 = checks:assert_equal_none G319 = checks:no_translate_debug_logs G327 = checks:check_no_contextlib_nested G328 = checks:dict_constructor_with_list_copy G330 = checks:no_log_warn paths = ./glance/hacking [testenv:docs] deps = -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build rm -fr doc/source/contributor/api sphinx-build -W -b html doc/source doc/build/html whereto doc/source/_extra/.htaccess doc/test/redirect-tests.txt [testenv:venv] deps = {[testenv]deps} -r{toxinidir}/doc/requirements.txt commands = {posargs} [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:cover] setenv = PYTHON=coverage run --source glance --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:debug] # this will use whatever the system python3 is commands = oslo_debug_helper {posargs} [testenv:debug-py38] commands = oslo_debug_helper {posargs} [testenv:debug-py39] commands = oslo_debug_helper {posargs}